diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java index 922977c03a4..bc75881292f 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java @@ -22,8 +22,14 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; -import java.io.*; -import java.util.*; +import java.io.File; +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Optional; +import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -34,8 +40,8 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MiniHBaseCluster; -import org.apache.hadoop.hbase.RegionLoad; -import org.apache.hadoop.hbase.ServerLoad; +import org.apache.hadoop.hbase.RegionMetrics; +import org.apache.hadoop.hbase.ServerMetrics; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; @@ -47,8 +53,10 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.ClassLoaderTestHelper; import org.apache.hadoop.hbase.util.CoprocessorClassLoader; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.junit.*; +import org.junit.AfterClass; +import org.junit.BeforeClass; import org.junit.ClassRule; +import org.junit.Test; import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -482,13 +490,13 @@ public class TestClassLoading { * @param tableName : given table. * @return subset of all servers. */ - Map serversForTable(String tableName) { - Map serverLoadHashMap = new HashMap<>(); - for(Map.Entry server: + Map serversForTable(String tableName) { + Map serverLoadHashMap = new HashMap<>(); + for(Map.Entry server: TEST_UTIL.getMiniHBaseCluster().getMaster().getServerManager(). getOnlineServers().entrySet()) { - for( Map.Entry region: - server.getValue().getRegionsLoad().entrySet()) { + for(Map.Entry region: + server.getValue().getRegionMetrics().entrySet()) { if (region.getValue().getNameAsString().equals(tableName)) { // this server hosts a region of tableName: add this server.. serverLoadHashMap.put(server.getKey(),server.getValue()); @@ -501,8 +509,7 @@ public class TestClassLoading { } void assertAllRegionServers(String tableName) throws InterruptedException { - Map servers; - String[] actualCoprocessors = null; + Map servers; boolean success = false; String[] expectedCoprocessors = regionServerSystemCoprocessors; if (tableName == null) { @@ -513,8 +520,9 @@ public class TestClassLoading { } for (int i = 0; i < 5; i++) { boolean any_failed = false; - for(Map.Entry server: servers.entrySet()) { - actualCoprocessors = server.getValue().getRsCoprocessors(); + for(Map.Entry server: servers.entrySet()) { + String[] actualCoprocessors = + server.getValue().getCoprocessorNames().stream().toArray(size -> new String[size]); if (!Arrays.equals(actualCoprocessors, expectedCoprocessors)) { LOG.debug("failed comparison: actual: " + Arrays.toString(actualCoprocessors) + diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java index 4685c011093..d6df9109fdb 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java @@ -124,8 +124,8 @@ public class TestRSGroupsOfflineMode { LOG.info("Waiting for region unassignments on failover RS..."); TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - return master.getServerManager().getLoad(failoverRS.getServerName()) - .getRegionsLoad().size() > 0; + return !master.getServerManager().getLoad(failoverRS.getServerName()) + .getRegionMetrics().isEmpty(); } }); } diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RSGroupListTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RSGroupListTmpl.jamon index 9f9831f7d5b..9a0e36916f1 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RSGroupListTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RSGroupListTmpl.jamon @@ -30,7 +30,9 @@ ServerManager serverManager; java.util.Set; java.util.stream.Collectors; org.apache.hadoop.hbase.master.HMaster; - org.apache.hadoop.hbase.ServerLoad; + org.apache.hadoop.hbase.RegionMetrics; + org.apache.hadoop.hbase.ServerMetrics; + org.apache.hadoop.hbase.Size; org.apache.hadoop.hbase.RSGroupTableAccessor; org.apache.hadoop.hbase.master.ServerManager; org.apache.hadoop.hbase.net.Address; @@ -45,7 +47,7 @@ List groups = RSGroupTableAccessor.getAllRSGroupInfo(master.getConn <%java> RSGroupInfo [] rsGroupInfos = groups.toArray(new RSGroupInfo[groups.size()]); -Map collectServers = Collections.emptyMap(); +Map collectServers = Collections.emptyMap(); if (master.getServerManager() != null) { collectServers = master.getServerManager().getOnlineServers().entrySet().stream() @@ -85,7 +87,7 @@ if (master.getServerManager() != null) { <%def rsgroup_baseStats> <%args> RSGroupInfo [] rsGroupInfos; - Map collectServers; + Map collectServers; @@ -112,13 +114,13 @@ if (master.getServerManager() != null) { int numRegionsOnline = 0; Set
servers = rsGroupInfo.getServers(); for (Address server : servers) { - ServerLoad sl = collectServers.get(server); + ServerMetrics sl = collectServers.get(server); if (sl != null) { - requestsPerSecond += sl.getNumberOfRequests(); - numRegionsOnline += sl.getNumberOfRegions(); + requestsPerSecond += sl.getRequestCountPerSecond(); + numRegionsOnline += sl.getRegionMetrics().size(); //rsgroup total - totalRegions += sl.getNumberOfRegions(); - totalRequests += sl.getNumberOfRequests(); + totalRegions += sl.getRegionMetrics().size(); + totalRequests += sl.getRequestCountPerSecond(); totalOnlineServers++; onlineServers++; } else { @@ -157,7 +159,7 @@ if (master.getServerManager() != null) { <%def rsgroup_memoryStats> <%args> RSGroupInfo [] rsGroupInfos; - Map collectServers; + Map collectServers;
@@ -174,11 +176,12 @@ if (master.getServerManager() != null) { long maxHeap = 0; long memstoreSize = 0; for (Address server : rsGroupInfo.getServers()) { - ServerLoad sl = collectServers.get(server); + ServerMetrics sl = collectServers.get(server); if (sl != null) { - usedHeap += sl.getUsedHeapMB(); - maxHeap += sl.getMaxHeapMB(); - memstoreSize += sl.getMemstoreSizeInMB(); + usedHeap += (long) sl.getUsedHeapSize().get(Size.Unit.MEGABYTE); + maxHeap += (long) sl.getMaxHeapSize().get(Size.Unit.MEGABYTE); + memstoreSize += (long) sl.getRegionMetrics().values().stream().mapToDouble( + rm -> rm.getMemStoreSize().get(Size.Unit.MEGABYTE)).sum(); } } @@ -201,7 +204,7 @@ if (master.getServerManager() != null) { <%def rsgroup_requestStats> <%args> RSGroupInfo [] rsGroupInfos; - Map collectServers; + Map collectServers;
@@ -217,11 +220,13 @@ if (master.getServerManager() != null) { long readRequests = 0; long writeRequests = 0; for (Address server : rsGroupInfo.getServers()) { - ServerLoad sl = collectServers.get(server); + ServerMetrics sl = collectServers.get(server); if (sl != null) { - requestsPerSecond += sl.getNumberOfRequests(); - readRequests += sl.getReadRequestsCount(); - writeRequests += sl.getWriteRequestsCount(); + for (RegionMetrics rm : sl.getRegionMetrics().values()) { + readRequests += rm.getReadRequestCount(); + writeRequests += rm.getWriteRequestCount(); + } + requestsPerSecond += sl.getRequestCountPerSecond(); } } @@ -241,7 +246,7 @@ if (master.getServerManager() != null) { <%def rsgroup_storeStats> <%args> RSGroupInfo [] rsGroupInfos; - Map collectServers; + Map collectServers;
@@ -264,14 +269,16 @@ if (master.getServerManager() != null) { long bloomSize = 0; int count = 0; for (Address server : rsGroupInfo.getServers()) { - ServerLoad sl = collectServers.get(server); + ServerMetrics sl = collectServers.get(server); if (sl != null) { - numStores += sl.getStores(); - numStorefiles += sl.getStorefiles(); - uncompressedStorefileSize += sl.getStoreUncompressedSizeMB(); - storefileSize += sl.getStorefileSizeInMB(); - indexSize += sl.getTotalStaticIndexSizeKB(); - bloomSize += sl.getTotalStaticBloomSizeKB(); + for (RegionMetrics rm : sl.getRegionMetrics().values()) { + numStores += rm.getStoreCount(); + numStorefiles += rm.getStoreFileCount(); + uncompressedStorefileSize += rm.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE); + storefileSize += rm.getStoreFileSize().get(Size.Unit.MEGABYTE); + indexSize += rm.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE); + bloomSize += rm.getBloomFilterSize().get(Size.Unit.KILOBYTE); + } count++; } } @@ -298,7 +305,7 @@ if (master.getServerManager() != null) { <%def rsgroup_compactStats> <%args> RSGroupInfo [] rsGroupInfos; - Map collectServers; + Map collectServers;
@@ -312,28 +319,30 @@ if (master.getServerManager() != null) { for (RSGroupInfo rsGroupInfo: rsGroupInfos) { String rsGroupName = rsGroupInfo.getName(); int numStores = 0; - long totalCompactingKVs = 0; - long numCompactedKVs = 0; + long totalCompactingCells = 0; + long totalCompactedCells = 0; long remainingKVs = 0; long compactionProgress = 0; for (Address server : rsGroupInfo.getServers()) { - ServerLoad sl = collectServers.get(server); + ServerMetrics sl = collectServers.get(server); if (sl != null) { - totalCompactingKVs += sl.getTotalCompactingKVs(); - numCompactedKVs += sl.getCurrentCompactedKVs(); + for (RegionMetrics rl : sl.getRegionMetrics().values()) { + totalCompactingCells += rl.getCompactingCellCount(); + totalCompactedCells += rl.getCompactedCellCount(); + } } } - remainingKVs = totalCompactingKVs - numCompactedKVs; + remainingKVs = totalCompactingCells - totalCompactedCells; String percentDone = ""; - if (totalCompactingKVs > 0) { + if (totalCompactingCells > 0) { percentDone = String.format("%.2f", 100 * - ((float) numCompactedKVs / totalCompactingKVs)) + "%"; + ((float) totalCompactedCells / totalCompactingCells)) + "%"; } - - + + diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon index 0b5599daf33..fb7dd54d0b3 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon @@ -26,8 +26,10 @@ HMaster master; <%import> java.util.*; org.apache.hadoop.hbase.master.HMaster; - org.apache.hadoop.hbase.ServerLoad; + org.apache.hadoop.hbase.RegionMetrics; + org.apache.hadoop.hbase.ServerMetrics; org.apache.hadoop.hbase.ServerName; + org.apache.hadoop.hbase.Size; org.apache.hadoop.hbase.util.VersionInfo; org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; @@ -84,12 +86,12 @@ Arrays.sort(serverNames); <%java> int totalRegions = 0; - int totalRequests = 0; + int totalRequestsPerSecond = 0; int inconsistentNodeNum = 0; String masterVersion = VersionInfo.getVersion(); for (ServerName serverName: serverNames) { - ServerLoad sl = master.getServerManager().getLoad(serverName); + ServerMetrics sl = master.getServerManager().getLoad(serverName); String version = master.getRegionServerVersion(serverName); if (!masterVersion.equals(version)) { inconsistentNodeNum ++; @@ -100,12 +102,11 @@ Arrays.sort(serverNames); long lastContact = 0; if (sl != null) { - requestsPerSecond = sl.getRequestsPerSecond(); - numRegionsOnline = sl.getNumberOfRegions(); - totalRegions += sl.getNumberOfRegions(); - // Is this correct? Adding a rate to a measure. - totalRequests += sl.getNumberOfRequests(); - lastContact = (System.currentTimeMillis() - sl.getReportTime())/1000; + requestsPerSecond = sl.getRequestCountPerSecond(); + numRegionsOnline = sl.getRegionMetrics().size(); + totalRegions += sl.getRegionMetrics().size(); + totalRequestsPerSecond += sl.getRequestCountPerSecond(); + lastContact = (System.currentTimeMillis() - sl.getReportTimestamp())/1000; } long startcode = serverName.getStartcode(); @@ -128,7 +129,7 @@ Arrays.sort(serverNames); <%else> - +
<& rsGroupLink; rsGroupName=rsGroupName; &><% totalCompactingKVs %><% numCompactedKVs %><% totalCompactingCells %><% totalCompactedCells %> <% remainingKVs %> <% percentDone %>
<% totalRequests %><% totalRequestsPerSecond %> <% totalRegions %>
@@ -149,16 +150,20 @@ Arrays.sort(serverNames); <%java> for (ServerName serverName: serverNames) { - ServerLoad sl = master.getServerManager().getLoad(serverName); + ServerMetrics sl = master.getServerManager().getLoad(serverName); if (sl != null) { + long memStoreSizeMB = 0; + for (RegionMetrics rl : sl.getRegionMetrics().values()) { + memStoreSizeMB += rl.getMemStoreSize().get(Size.Unit.MEGABYTE); + } <& serverNameLink; serverName=serverName; serverLoad = sl; &> - <% TraditionalBinaryPrefix.long2String(sl.getUsedHeapMB() + <% TraditionalBinaryPrefix.long2String((long) sl.getUsedHeapSize().get(Size.Unit.MEGABYTE) * TraditionalBinaryPrefix.MEGA.value, "B", 1) %> - <% TraditionalBinaryPrefix.long2String(sl.getMaxHeapMB() + <% TraditionalBinaryPrefix.long2String((long) sl.getMaxHeapSize().get(Size.Unit.MEGABYTE) * TraditionalBinaryPrefix.MEGA.value, "B", 1) %> - <% TraditionalBinaryPrefix.long2String(sl.getMemStoreSizeMB() + <% TraditionalBinaryPrefix.long2String(memStoreSizeMB * TraditionalBinaryPrefix.MEGA.value, "B", 1) %> @@ -189,15 +194,23 @@ for (ServerName serverName: serverNames) { <%java> for (ServerName serverName: serverNames) { -ServerLoad sl = master.getServerManager().getLoad(serverName); +ServerMetrics sl = master.getServerManager().getLoad(serverName); if (sl != null) { + long readRequestCount = 0; + long writeRequestCount = 0; + long filteredReadRequestCount = 0; + for (RegionMetrics rl : sl.getRegionMetrics().values()) { + readRequestCount += rl.getReadRequestCount(); + writeRequestCount += rl.getWriteRequestCount(); + filteredReadRequestCount += rl.getFilteredReadRequestCount(); + } <& serverNameLink; serverName=serverName; serverLoad = sl; &> -<% String.format("%.0f", sl.getRequestsPerSecond()) %> -<% sl.getReadRequestsCount() %> -<% sl.getFilteredReadRequestsCount() %> -<% sl.getWriteRequestsCount() %> +<% sl.getRequestCountPerSecond() %> +<% readRequestCount %> +<% filteredReadRequestCount %> +<% writeRequestCount %> <%java> } else { @@ -228,20 +241,34 @@ if (sl != null) { <%java> for (ServerName serverName: serverNames) { -ServerLoad sl = master.getServerManager().getLoad(serverName); +ServerMetrics sl = master.getServerManager().getLoad(serverName); if (sl != null) { + long storeCount = 0; + long storeFileCount = 0; + long storeUncompressedSizeMB = 0; + long storeFileSizeMB = 0; + long totalStaticIndexSizeKB = 0; + long totalStaticBloomSizeKB = 0; + for (RegionMetrics rl : sl.getRegionMetrics().values()) { + storeCount += rl.getStoreCount(); + storeFileCount += rl.getStoreFileCount(); + storeUncompressedSizeMB += rl.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE); + storeFileSizeMB += rl.getStoreFileSize().get(Size.Unit.MEGABYTE); + totalStaticIndexSizeKB += rl.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE); + totalStaticBloomSizeKB += rl.getBloomFilterSize().get(Size.Unit.KILOBYTE); + } <& serverNameLink; serverName=serverName; serverLoad = sl; &> -<% sl.getStores() %> -<% sl.getStorefiles() %> +<% storeCount %> +<% storeFileCount %> <% TraditionalBinaryPrefix.long2String( - sl.getStoreUncompressedSizeMB() * TraditionalBinaryPrefix.MEGA.value, "B", 1) %> -<% TraditionalBinaryPrefix.long2String(sl.getStorefileSizeMB() + storeUncompressedSizeMB * TraditionalBinaryPrefix.MEGA.value, "B", 1) %> +<% TraditionalBinaryPrefix.long2String(storeFileSizeMB * TraditionalBinaryPrefix.MEGA.value, "B", 1) %> -<% TraditionalBinaryPrefix.long2String(sl.getTotalStaticIndexSizeKB() +<% TraditionalBinaryPrefix.long2String(totalStaticIndexSizeKB * TraditionalBinaryPrefix.KILO.value, "B", 1) %> -<% TraditionalBinaryPrefix.long2String(sl.getTotalStaticBloomSizeKB() +<% TraditionalBinaryPrefix.long2String(totalStaticBloomSizeKB * TraditionalBinaryPrefix.KILO.value, "B", 1) %> <%java> @@ -270,19 +297,25 @@ if (sl != null) { <%java> for (ServerName serverName: serverNames) { -ServerLoad sl = master.getServerManager().getLoad(serverName); +ServerMetrics sl = master.getServerManager().getLoad(serverName); if (sl != null) { +long totalCompactingCells = 0; +long totalCompactedCells = 0; +for (RegionMetrics rl : sl.getRegionMetrics().values()) { + totalCompactingCells += rl.getCompactingCellCount(); + totalCompactedCells += rl.getCompactedCellCount(); +} String percentDone = ""; -if (sl.getTotalCompactingKVs() > 0) { +if (totalCompactingCells > 0) { percentDone = String.format("%.2f", 100 * - ((float) sl.getCurrentCompactedKVs() / sl.getTotalCompactingKVs())) + "%"; + ((float) totalCompactedCells / totalCompactingCells)) + "%"; } <& serverNameLink; serverName=serverName; serverLoad = sl; &> -<% sl.getTotalCompactingKVs() %> -<% sl.getCurrentCompactedKVs() %> -<% sl.getTotalCompactingKVs() - sl.getCurrentCompactedKVs() %> +<% totalCompactingCells %> +<% totalCompactedCells %> +<% totalCompactingCells - totalCompactedCells %> <% percentDone %> <%java> @@ -300,7 +333,7 @@ if (sl.getTotalCompactingKVs() > 0) { <%def serverNameLink> <%args> ServerName serverName; - ServerLoad serverLoad; + ServerMetrics serverLoad; <%java> int infoPort = master.getRegionServerInfoPort(serverName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java index 81aa12dfe92..686939016d7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java @@ -28,11 +28,10 @@ import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.hadoop.hbase.ServerLoad; +import org.apache.hadoop.hbase.ServerMetrics; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; @@ -46,6 +45,7 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; @@ -132,12 +132,12 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored } //the region is currently on none of the favored nodes //get it on one of them if possible - ServerLoad l1 = super.services.getServerManager().getLoad( + ServerMetrics l1 = super.services.getServerManager().getLoad( serverNameWithoutCodeToServerName.get(favoredNodes.get(1))); - ServerLoad l2 = super.services.getServerManager().getLoad( + ServerMetrics l2 = super.services.getServerManager().getLoad( serverNameWithoutCodeToServerName.get(favoredNodes.get(2))); if (l1 != null && l2 != null) { - if (l1.getLoad() > l2.getLoad()) { + if (l1.getRegionMetrics().size() > l2.getRegionMetrics().size()) { destination = serverNameWithoutCodeToServerName.get(favoredNodes.get(2)); } else { destination = serverNameWithoutCodeToServerName.get(favoredNodes.get(1)); @@ -296,9 +296,9 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored // assign the region to the one with a lower load // (both have the desired hdfs blocks) ServerName s; - ServerLoad tertiaryLoad = super.services.getServerManager().getLoad(tertiaryHost); - ServerLoad secondaryLoad = super.services.getServerManager().getLoad(secondaryHost); - if (secondaryLoad.getLoad() < tertiaryLoad.getLoad()) { + ServerMetrics tertiaryLoad = super.services.getServerManager().getLoad(tertiaryHost); + ServerMetrics secondaryLoad = super.services.getServerManager().getLoad(secondaryHost); + if (secondaryLoad.getRegionMetrics().size() < tertiaryLoad.getRegionMetrics().size()) { s = secondaryHost; } else { s = tertiaryHost; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java index 262c59ebf54..0dd50ff4c18 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java @@ -24,13 +24,10 @@ import java.io.PrintStream; import java.io.PrintWriter; import java.util.Date; import java.util.Map; - import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.ServerLoad; +import org.apache.hadoop.hbase.ServerMetrics; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode; @@ -39,6 +36,7 @@ import org.apache.hadoop.hbase.monitoring.StateDumpServlet; import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.regionserver.RSDumpServlet; import org.apache.hadoop.hbase.util.Threads; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class MasterDumpServlet extends StateDumpServlet { @@ -132,8 +130,8 @@ public class MasterDumpServlet extends StateDumpServlet { return; } - Map servers = sm.getOnlineServers(); - for (Map.Entry e : servers.entrySet()) { + Map servers = sm.getOnlineServers(); + for (Map.Entry e : servers.entrySet()) { out.println(e.getKey() + ": " + e.getValue()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index b4f0faffb63..8f92041f6bc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -37,7 +37,8 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.Server; -import org.apache.hadoop.hbase.ServerLoad; +import org.apache.hadoop.hbase.ServerMetrics; +import org.apache.hadoop.hbase.ServerMetricsBuilder; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.UnknownRegionException; @@ -102,6 +103,7 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; @@ -451,16 +453,16 @@ public class MasterRpcServices extends RSRpcServices master.checkServiceStarted(); ClusterStatusProtos.ServerLoad sl = request.getLoad(); ServerName serverName = ProtobufUtil.toServerName(request.getServer()); - ServerLoad oldLoad = master.getServerManager().getLoad(serverName); - ServerLoad newLoad = new ServerLoad(serverName, sl); + ServerMetrics oldLoad = master.getServerManager().getLoad(serverName); + ServerMetrics newLoad = ServerMetricsBuilder.toServerMetrics(serverName, sl); master.getServerManager().regionServerReport(serverName, newLoad); int version = VersionInfoUtil.getCurrentClientVersionNumber(); master.getAssignmentManager().reportOnlineRegions(serverName, - version, newLoad.getRegionsLoad().keySet()); + version, newLoad.getRegionMetrics().keySet()); if (sl != null && master.metricsMaster != null) { // Up our metrics. master.metricsMaster.incrementRequests(sl.getTotalNumberOfRequests() - - (oldLoad != null ? oldLoad.getTotalNumberOfRequests() : 0)); + - (oldLoad != null ? oldLoad.getRequestCount() : 0)); } } catch (IOException ioe) { throw new ServiceException(ioe); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index dbcce1d408c..06d6c8b7279 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -37,13 +37,12 @@ import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Predicate; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClockOutOfSyncException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.NotServingRegionException; -import org.apache.hadoop.hbase.RegionLoad; -import org.apache.hadoop.hbase.ServerLoad; +import org.apache.hadoop.hbase.RegionMetrics; +import org.apache.hadoop.hbase.ServerMetrics; import org.apache.hadoop.hbase.ServerMetricsBuilder; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.YouAreDeadException; @@ -62,8 +61,10 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; @@ -124,7 +125,8 @@ public class ServerManager { storeFlushedSequenceIdsByRegion = new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR); /** Map of registered servers to their current load */ - private final ConcurrentNavigableMap onlineServers = new ConcurrentSkipListMap<>(); + private final ConcurrentNavigableMap onlineServers = + new ConcurrentSkipListMap<>(); /** * Map of admin interfaces per registered regionserver; these interfaces we use to control @@ -240,7 +242,7 @@ public class ServerManager { request.getServerStartCode()); checkClockSkew(sn, request.getServerCurrentTime()); checkIsDead(sn, "STARTUP"); - if (!checkAndRecordNewServer(sn, new ServerLoad(ServerMetricsBuilder.of(sn)))) { + if (!checkAndRecordNewServer(sn, ServerMetricsBuilder.of(sn))) { LOG.warn("THIS SHOULD NOT HAPPEN, RegionServerStartup" + " could not record the server: " + sn); } @@ -252,12 +254,11 @@ public class ServerManager { * @param sn * @param hsl */ - private void updateLastFlushedSequenceIds(ServerName sn, ServerLoad hsl) { - Map regionsLoad = hsl.getRegionsLoad(); - for (Entry entry : regionsLoad.entrySet()) { + private void updateLastFlushedSequenceIds(ServerName sn, ServerMetrics hsl) { + for (Entry entry : hsl.getRegionMetrics().entrySet()) { byte[] encodedRegionName = Bytes.toBytes(RegionInfo.encodeRegionName(entry.getKey())); Long existingValue = flushedSequenceIdByRegion.get(encodedRegionName); - long l = entry.getValue().getCompleteSequenceId(); + long l = entry.getValue().getCompletedSequenceId(); // Don't let smaller sequence ids override greater sequence ids. if (LOG.isTraceEnabled()) { LOG.trace(Bytes.toString(encodedRegionName) + ", existingValue=" + existingValue + @@ -273,10 +274,10 @@ public class ServerManager { ConcurrentNavigableMap storeFlushedSequenceId = computeIfAbsent(storeFlushedSequenceIdsByRegion, encodedRegionName, () -> new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR)); - for (StoreSequenceId storeSeqId : entry.getValue().getStoreCompleteSequenceId()) { - byte[] family = storeSeqId.getFamilyName().toByteArray(); + for (Entry storeSeqId : entry.getValue().getStoreSequenceId().entrySet()) { + byte[] family = storeSeqId.getKey(); existingValue = storeFlushedSequenceId.get(family); - l = storeSeqId.getSequenceId(); + l = storeSeqId.getValue(); if (LOG.isTraceEnabled()) { LOG.trace(Bytes.toString(encodedRegionName) + ", family=" + Bytes.toString(family) + ", existingValue=" + existingValue + ", completeSequenceId=" + l); @@ -291,7 +292,7 @@ public class ServerManager { @VisibleForTesting public void regionServerReport(ServerName sn, - ServerLoad sl) throws YouAreDeadException { + ServerMetrics sl) throws YouAreDeadException { checkIsDead(sn, "REPORT"); if (null == this.onlineServers.replace(sn, sl)) { // Already have this host+port combo and its just different start code? @@ -316,7 +317,7 @@ public class ServerManager { * @param sl the server load on the server * @return true if the server is recorded, otherwise, false */ - boolean checkAndRecordNewServer(final ServerName serverName, final ServerLoad sl) { + boolean checkAndRecordNewServer(final ServerName serverName, final ServerMetrics sl) { ServerName existingServer = null; synchronized (this.onlineServers) { existingServer = findServerWithSameHostnamePortWithLock(serverName); @@ -423,7 +424,7 @@ public class ServerManager { * @param serverName The remote servers name. */ @VisibleForTesting - void recordNewServerWithLock(final ServerName serverName, final ServerLoad sl) { + void recordNewServerWithLock(final ServerName serverName, final ServerMetrics sl) { LOG.info("Registering regionserver=" + serverName); this.onlineServers.put(serverName, sl); this.rsAdmins.remove(serverName); @@ -447,9 +448,9 @@ public class ServerManager { /** * @param serverName - * @return ServerLoad if serverName is known else null + * @return ServerMetrics if serverName is known else null */ - public ServerLoad getLoad(final ServerName serverName) { + public ServerMetrics getLoad(final ServerName serverName) { return this.onlineServers.get(serverName); } @@ -462,9 +463,9 @@ public class ServerManager { public double getAverageLoad() { int totalLoad = 0; int numServers = 0; - for (ServerLoad sl: this.onlineServers.values()) { - numServers++; - totalLoad += sl.getNumberOfRegions(); + for (ServerMetrics sl : this.onlineServers.values()) { + numServers++; + totalLoad += sl.getRegionMetrics().size(); } return numServers == 0 ? 0 : (double)totalLoad / (double)numServers; @@ -479,7 +480,7 @@ public class ServerManager { /** * @return Read-only map of servers to serverinfo */ - public Map getOnlineServers() { + public Map getOnlineServers() { // Presumption is that iterating the returned Map is OK. synchronized (this.onlineServers) { return Collections.unmodifiableMap(this.onlineServers); @@ -907,11 +908,11 @@ public class ServerManager { * @return A copy of the internal list of online servers matched by the predicator */ public List getOnlineServersListWithPredicator(List keys, - Predicate idleServerPredicator) { + Predicate idleServerPredicator) { List names = new ArrayList<>(); if (keys != null && idleServerPredicator != null) { keys.forEach(name -> { - ServerLoad load = onlineServers.get(name); + ServerMetrics load = onlineServers.get(name); if (load != null) { if (idleServerPredicator.test(load)) { names.add(name); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java index a8dd9aeb7fc..36f57f23c83 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java @@ -1,4 +1,5 @@ - /* +/** + * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,7 +34,6 @@ import java.util.Set; import java.util.TreeMap; import java.util.function.Predicate; import java.util.stream.Collectors; - import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterMetrics; @@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HDFSBlocksDistribution; -import org.apache.hadoop.hbase.ServerLoad; +import org.apache.hadoop.hbase.ServerMetrics; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; @@ -73,8 +73,8 @@ public abstract class BaseLoadBalancer implements LoadBalancer { private static final List EMPTY_REGION_LIST = new ArrayList<>(0); - static final Predicate IDLE_SERVER_PREDICATOR - = load -> load.getNumberOfRegions() == 0; + static final Predicate IDLE_SERVER_PREDICATOR + = load -> load.getRegionMetrics().isEmpty(); protected RegionLocationFinder regionFinder; protected boolean useRegionFinder; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java index a72478c3839..b65261029ee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java @@ -31,9 +31,8 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; - import org.apache.hadoop.hbase.HBaseIOException; -import org.apache.hadoop.hbase.ServerLoad; +import org.apache.hadoop.hbase.ServerMetrics; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.favored.FavoredNodeAssignmentHelper; @@ -47,6 +46,7 @@ import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.util.Pair; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; @@ -274,10 +274,10 @@ public class FavoredStochasticBalancer extends StochasticLoadBalancer implements // Assign the region to the one with a lower load (both have the desired hdfs blocks) ServerName s; - ServerLoad tertiaryLoad = super.services.getServerManager().getLoad(tertiaryHost); - ServerLoad secondaryLoad = super.services.getServerManager().getLoad(secondaryHost); + ServerMetrics tertiaryLoad = super.services.getServerManager().getLoad(tertiaryHost); + ServerMetrics secondaryLoad = super.services.getServerManager().getLoad(secondaryHost); if (secondaryLoad != null && tertiaryLoad != null) { - if (secondaryLoad.getLoad() < tertiaryLoad.getLoad()) { + if (secondaryLoad.getRegionMetrics().size() < tertiaryLoad.getRegionMetrics().size()) { s = secondaryHost; } else { s = tertiaryHost; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java index 4c3167fe915..7e1dd4ddcbc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java @@ -22,10 +22,10 @@ import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.List; - import org.apache.hadoop.hbase.HBaseIOException; -import org.apache.hadoop.hbase.RegionLoad; +import org.apache.hadoop.hbase.RegionMetrics; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.Size; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.RegionInfo; @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; /** @@ -44,7 +45,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; * *
    *
  1. Get all regions of a given table - *
  2. Get avg size S of each region (by total size of store files reported in RegionLoad) + *
  3. Get avg size S of each region (by total size of store files reported in RegionMetrics) *
  4. Seek every single region one by one. If a region R0 is bigger than S * 2, it is * kindly requested to split. Thereon evaluate the next region R1 *
  5. Otherwise, if R0 + R1 is smaller than S, R0 and R1 are kindly requested to merge. @@ -204,12 +205,12 @@ public class SimpleRegionNormalizer implements RegionNormalizer { private long getRegionSize(RegionInfo hri) { ServerName sn = masterServices.getAssignmentManager().getRegionStates(). getRegionServerOfRegion(hri); - RegionLoad regionLoad = masterServices.getServerManager().getLoad(sn). - getRegionsLoad().get(hri.getRegionName()); + RegionMetrics regionLoad = masterServices.getServerManager().getLoad(sn). + getRegionMetrics().get(hri.getRegionName()); if (regionLoad == null) { LOG.debug(hri.getRegionNameAsString() + " was not found in RegionsLoad"); return -1; } - return regionLoad.getStorefileSizeMB(); + return (long) regionLoad.getStoreFileSize().get(Size.Unit.MEGABYTE); } } diff --git a/hbase-server/src/main/resources/hbase-webapps/master/rsgroup.jsp b/hbase-server/src/main/resources/hbase-webapps/master/rsgroup.jsp index 9f95b763cca..7b7e227045d 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/rsgroup.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/rsgroup.jsp @@ -28,7 +28,6 @@ import="java.util.stream.Collectors" import="org.apache.hadoop.hbase.HTableDescriptor" import="org.apache.hadoop.hbase.RSGroupTableAccessor" - import="org.apache.hadoop.hbase.ServerLoad" import="org.apache.hadoop.hbase.ServerName" import="org.apache.hadoop.hbase.TableName" import="org.apache.hadoop.hbase.client.Admin" @@ -42,6 +41,9 @@ import="org.apache.hadoop.hbase.util.Bytes" import="org.apache.hadoop.hbase.util.VersionInfo" import="org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix"%> +<%@ page import="org.apache.hadoop.hbase.ServerMetrics" %> +<%@ page import="org.apache.hadoop.hbase.Size" %> +<%@ page import="org.apache.hadoop.hbase.RegionMetrics" %> <% HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER); String rsGroupName = request.getParameter("name"); @@ -67,7 +69,7 @@ return 0; }); - Map onlineServers = Collections.emptyMap(); + Map onlineServers = Collections.emptyMap(); Map serverMaping = Collections.emptyMap(); if (master.getServerManager() != null) { onlineServers = master.getServerManager().getOnlineServers().entrySet().stream() @@ -141,7 +143,7 @@ for (Address server: rsGroupServers) { ServerName serverName = serverMaping.get(server); if (serverName != null) { - ServerLoad sl = onlineServers.get(server); + ServerMetrics sl = onlineServers.get(server); String version = master.getRegionServerVersion(serverName); if (!masterVersion.equals(version)) { inconsistentNodeNum ++; @@ -150,11 +152,11 @@ int numRegionsOnline = 0; long lastContact = 0; if (sl != null) { - requestsPerSecond = sl.getRequestsPerSecond(); - numRegionsOnline = sl.getNumberOfRegions(); - totalRegions += sl.getNumberOfRegions(); - totalRequests += sl.getNumberOfRequests(); - lastContact = (System.currentTimeMillis() - sl.getReportTime())/1000; + requestsPerSecond = sl.getRequestCountPerSecond(); + numRegionsOnline = sl.getRegionMetrics().size(); + totalRegions += sl.getRegionMetrics().size(); + totalRequests += sl.getRequestCount(); + lastContact = (System.currentTimeMillis() - sl.getReportTimestamp())/1000; } long startcode = serverName.getStartcode(); int infoPort = master.getRegionServerInfoPort(serverName); @@ -201,18 +203,21 @@ <% for (Address server: rsGroupServers) { ServerName serverName = serverMaping.get(server); - ServerLoad sl = onlineServers.get(server); + ServerMetrics sl = onlineServers.get(server); if (sl != null && serverName != null) { + double memStoreSizeMB = sl.getRegionMetrics().values() + .stream().mapToDouble(rm -> rm.getMemStoreSize().get(Size.Unit.MEGABYTE)) + .sum(); int infoPort = master.getRegionServerInfoPort(serverName); String url = "//" + serverName.getHostname() + ":" + infoPort + "/rs-status"; %> <%= serverName.getServerName() %> - <%= TraditionalBinaryPrefix.long2String(sl.getUsedHeapMB() + <%= TraditionalBinaryPrefix.long2String((long) sl.getUsedHeapSize().get(Size.Unit.MEGABYTE) * TraditionalBinaryPrefix.MEGA.value, "B", 1) %> - <%= TraditionalBinaryPrefix.long2String(sl.getMaxHeapMB() + <%= TraditionalBinaryPrefix.long2String((long) sl.getMaxHeapSize().get(Size.Unit.MEGABYTE) * TraditionalBinaryPrefix.MEGA.value, "B", 1) %> - <%= TraditionalBinaryPrefix.long2String(sl.getMemstoreSizeInMB() + <%= TraditionalBinaryPrefix.long2String((long) memStoreSizeMB * TraditionalBinaryPrefix.MEGA.value, "B", 1) %> <% } else { %> @@ -236,16 +241,22 @@ <% for (Address server: rsGroupServers) { ServerName serverName = serverMaping.get(server); - ServerLoad sl = onlineServers.get(server); + ServerMetrics sl = onlineServers.get(server); if (sl != null && serverName != null) { int infoPort = master.getRegionServerInfoPort(serverName); + long readRequestCount = 0; + long writeRequestCount = 0; + for (RegionMetrics rm : sl.getRegionMetrics().values()) { + readRequestCount += rm.getReadRequestCount(); + writeRequestCount += rm.getWriteRequestCount(); + } String url = "//" + serverName.getHostname() + ":" + infoPort + "/rs-status"; %> <%= serverName.getServerName() %> - <%= String.format("%.0f", sl.getRequestsPerSecond()) %> - <%= sl.getReadRequestsCount() %> - <%= sl.getWriteRequestsCount() %> + <%= String.format("%.0f", sl.getRequestCountPerSecond()) %> + <%= readRequestCount %> + <%= writeRequestCount %> <% } else { %> @@ -271,22 +282,36 @@ <% for (Address server: rsGroupServers) { ServerName serverName = serverMaping.get(server); - ServerLoad sl = onlineServers.get(server); + ServerMetrics sl = onlineServers.get(server); if (sl != null && serverName != null) { + long storeCount = 0; + long storeFileCount = 0; + double storeUncompressedSizeMB = 0; + double storeFileSizeMB = 0; + double totalStaticIndexSizeKB = 0; + double totalStaticBloomSizeKB = 0; + for (RegionMetrics rm : sl.getRegionMetrics().values()) { + storeCount += rm.getStoreCount(); + storeFileCount += rm.getStoreFileCount(); + storeUncompressedSizeMB += rm.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE); + storeFileSizeMB += rm.getStoreFileSize().get(Size.Unit.MEGABYTE); + totalStaticIndexSizeKB += rm.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE); + totalStaticBloomSizeKB += rm.getBloomFilterSize().get(Size.Unit.KILOBYTE); + } int infoPort = master.getRegionServerInfoPort(serverName); String url = "//" + serverName.getHostname() + ":" + infoPort + "/rs-status"; %> <%= serverName.getServerName() %> - <%= sl.getStores() %> - <%= sl.getStorefiles() %> + <%= storeCount %> + <%= storeFileCount %> <%= TraditionalBinaryPrefix.long2String( - sl.getStoreUncompressedSizeMB() * TraditionalBinaryPrefix.MEGA.value, "B", 1) %> - <%= TraditionalBinaryPrefix.long2String(sl.getStorefileSizeInMB() + (long) storeUncompressedSizeMB * TraditionalBinaryPrefix.MEGA.value, "B", 1) %> + <%= TraditionalBinaryPrefix.long2String((long) storeFileSizeMB * TraditionalBinaryPrefix.MEGA.value, "B", 1) %> - <%= TraditionalBinaryPrefix.long2String(sl.getTotalStaticIndexSizeKB() + <%= TraditionalBinaryPrefix.long2String((long) totalStaticIndexSizeKB * TraditionalBinaryPrefix.KILO.value, "B", 1) %> - <%= TraditionalBinaryPrefix.long2String(sl.getTotalStaticBloomSizeKB() + <%= TraditionalBinaryPrefix.long2String((long) totalStaticBloomSizeKB * TraditionalBinaryPrefix.KILO.value, "B", 1) %> <% } else { %> @@ -314,21 +339,27 @@ <% for (Address server: rsGroupServers) { ServerName serverName = serverMaping.get(server); - ServerLoad sl = onlineServers.get(server); + ServerMetrics sl = onlineServers.get(server); if (sl != null && serverName != null) { + long totalCompactingCells = 0; + long currentCompactedCells = 0; + for (RegionMetrics rm : sl.getRegionMetrics().values()) { + totalCompactingCells += rm.getCompactingCellCount(); + currentCompactedCells += rm.getCompactedCellCount(); + } String percentDone = ""; - if (sl.getTotalCompactingKVs() > 0) { + if (totalCompactingCells > 0) { percentDone = String.format("%.2f", 100 * - ((float) sl.getCurrentCompactedKVs() / sl.getTotalCompactingKVs())) + "%"; + ((float) currentCompactedCells / totalCompactingCells)) + "%"; } int infoPort = master.getRegionServerInfoPort(serverName); String url = "//" + serverName.getHostname() + ":" + infoPort + "/rs-status"; %> <%= serverName.getServerName() %> - <%= sl.getTotalCompactingKVs() %> - <%= sl.getCurrentCompactedKVs() %> - <%= sl.getTotalCompactingKVs() - sl.getCurrentCompactedKVs() %> + <%= totalCompactingCells %> + <%= currentCompactedCells %> + <%= totalCompactingCells - currentCompactedCells %> <%= percentDone %> <% } else { %> diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp index 9252552ca35..e52f33a45c3 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp @@ -34,8 +34,6 @@ import="org.apache.hadoop.hbase.HColumnDescriptor" import="org.apache.hadoop.hbase.HConstants" import="org.apache.hadoop.hbase.HRegionLocation" - import="org.apache.hadoop.hbase.RegionLoad" - import="org.apache.hadoop.hbase.ServerLoad" import="org.apache.hadoop.hbase.ServerName" import="org.apache.hadoop.hbase.TableName" import="org.apache.hadoop.hbase.TableNotFoundException" @@ -60,16 +58,20 @@ <%@ page import="org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos" %> <%@ page import="org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas" %> <%@ page import="org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota" %> +<%@ page import="org.apache.hadoop.hbase.ServerMetrics" %> +<%@ page import="org.apache.hadoop.hbase.RegionMetrics" %> +<%@ page import="org.apache.hadoop.hbase.Size" %> +<%@ page import="org.apache.hadoop.hbase.RegionMetricsBuilder" %> <%! /** * @return An empty region load stamped with the passed in regionInfo * region name. */ - private RegionLoad getEmptyRegionLoad(final RegionInfo regionInfo) { - return new RegionLoad(ClusterStatusProtos.RegionLoad.newBuilder(). - setRegionSpecifier(HBaseProtos.RegionSpecifier.newBuilder(). - setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME). - setValue(ByteString.copyFrom(regionInfo.getRegionName())).build()).build()); + private RegionMetrics getEmptyRegionMetrics(final RegionInfo regionInfo) { + return RegionMetricsBuilder.toRegionMetrics(ClusterStatusProtos.RegionLoad.newBuilder(). + setRegionSpecifier(HBaseProtos.RegionSpecifier.newBuilder(). + setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME). + setValue(ByteString.copyFrom(regionInfo.getRegionName())).build()).build()); } %> <% @@ -87,7 +89,6 @@ Table table; String tableHeader; boolean withReplica = false; - ServerName rl = metaTableLocator.getMetaRegionLocation(master.getZooKeeper()); boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false); boolean readOnly = conf.getBoolean("hbase.master.ui.readonly", false); int numMetaReplicas = conf.getInt(HConstants.META_REPLICAS_NUM, @@ -216,18 +217,18 @@ if ( fqtn != null ) { float locality = 0.0f; if (metaLocation != null) { - ServerLoad sl = master.getServerManager().getLoad(metaLocation); + ServerMetrics sl = master.getServerManager().getLoad(metaLocation); // The host name portion should be safe, but I don't know how we handle IDNs so err on the side of failing safely. hostAndPort = URLEncoder.encode(metaLocation.getHostname()) + ":" + master.getRegionServerInfoPort(metaLocation); if (sl != null) { - Map map = sl.getRegionsLoad(); + Map map = sl.getRegionMetrics(); if (map.containsKey(meta.getRegionName())) { - RegionLoad load = map.get(meta.getRegionName()); - readReq = String.format("%,1d", load.getReadRequestsCount()); - writeReq = String.format("%,1d", load.getWriteRequestsCount()); - fileSize = StringUtils.byteDesc(load.getStorefileSizeMB()*1024l*1024); - fileCount = String.format("%,1d", load.getStorefiles()); - memSize = StringUtils.byteDesc(load.getMemStoreSizeMB()*1024l*1024); + RegionMetrics load = map.get(meta.getRegionName()); + readReq = String.format("%,1d", load.getReadRequestCount()); + writeReq = String.format("%,1d", load.getWriteRequestCount()); + fileSize = StringUtils.byteDesc((long) load.getStoreFileSize().get(Size.Unit.BYTE)); + fileCount = String.format("%,1d", load.getStoreFileCount()); + memSize = StringUtils.byteDesc((long) load.getMemStoreSize().get(Size.Unit.BYTE)); locality = load.getDataLocality(); } } @@ -400,7 +401,7 @@ if ( fqtn != null ) { Map regDistribution = new TreeMap<>(); Map primaryRegDistribution = new TreeMap<>(); List regions = r.getAllRegionLocations(); - Map regionsToLoad = new LinkedHashMap<>(); + Map regionsToLoad = new LinkedHashMap<>(); Map regionsToServer = new LinkedHashMap<>(); for (HRegionLocation hriEntry : regions) { RegionInfo regionInfo = hriEntry.getRegionInfo(); @@ -408,28 +409,27 @@ if ( fqtn != null ) { regionsToServer.put(regionInfo, addr); if (addr != null) { - ServerLoad sl = master.getServerManager().getLoad(addr); + ServerMetrics sl = master.getServerManager().getLoad(addr); if (sl != null) { - Map map = sl.getRegionsLoad(); - RegionLoad regionload = map.get(regionInfo.getRegionName()); - regionsToLoad.put(regionInfo, regionload); - if(regionload != null) { - totalReadReq += regionload.getReadRequestsCount(); - totalWriteReq += regionload.getWriteRequestsCount(); - totalSize += regionload.getStorefileSizeMB(); - totalStoreFileCount += regionload.getStorefiles(); - totalMemSize += regionload.getMemStoreSizeMB(); - totalStoreFileSizeMB += regionload.getStorefileSizeMB(); + RegionMetrics regionMetrics = sl.getRegionMetrics().get(regionInfo.getRegionName()); + regionsToLoad.put(regionInfo, regionMetrics); + if(regionMetrics != null) { + totalReadReq += regionMetrics.getReadRequestCount(); + totalWriteReq += regionMetrics.getWriteRequestCount(); + totalSize += regionMetrics.getStoreFileSize().get(Size.Unit.MEGABYTE); + totalStoreFileCount += regionMetrics.getStoreFileCount(); + totalMemSize += regionMetrics.getMemStoreSize().get(Size.Unit.MEGABYTE); + totalStoreFileSizeMB += regionMetrics.getStoreFileSize().get(Size.Unit.MEGABYTE); } else { - RegionLoad load0 = getEmptyRegionLoad(regionInfo); + RegionMetrics load0 = getEmptyRegionMetrics(regionInfo); regionsToLoad.put(regionInfo, load0); } } else{ - RegionLoad load0 = getEmptyRegionLoad(regionInfo); + RegionMetrics load0 = getEmptyRegionMetrics(regionInfo); regionsToLoad.put(regionInfo, load0); } } else { - RegionLoad load0 = getEmptyRegionLoad(regionInfo); + RegionMetrics load0 = getEmptyRegionMetrics(regionInfo); regionsToLoad.put(regionInfo, load0); } } @@ -474,156 +474,92 @@ ShowDetailName&Start/End Key>() { - public int compare( - Map.Entry entry1, - Map.Entry entry2) { - if (entry1 == null || entry1.getValue() == null) { - return -1; - } else if (entry2 == null || entry2.getValue() == null) { - return 1; - } - int result = 0; - if (entry1.getValue().getReadRequestsCount() < entry2.getValue().getReadRequestsCount()) { - result = -1; - } else if (entry1.getValue().getReadRequestsCount() > entry2.getValue().getReadRequestsCount()) { - result = 1; - } - if (reverseOrder) { - result = -1 * result; - } - return result; - } - }); + Collections.sort(entryList, (entry1, entry2) -> { + if (entry1 == null || entry1.getValue() == null) { + return -1; + } else if (entry2 == null || entry2.getValue() == null) { + return 1; + } + int result = Long.compare(entry1.getValue().getReadRequestCount(), + entry2.getValue().getReadRequestCount()); + if (reverseOrder) { + result = -1 * result; + } + return result; + }); } else if (sortKey.equals("writerequest")) { - Collections.sort(entryList, - new Comparator>() { - public int compare( - Map.Entry entry1, - Map.Entry entry2) { - if (entry1 == null || entry1.getValue() == null) { - return -1; - } else if (entry2 == null || entry2.getValue() == null) { - return 1; - } - int result = 0; - if (entry1.getValue().getWriteRequestsCount() < entry2.getValue() - .getWriteRequestsCount()) { - result = -1; - } else if (entry1.getValue().getWriteRequestsCount() > entry2.getValue() - .getWriteRequestsCount()) { - result = 1; - } - if (reverseOrder) { - result = -1 * result; - } - return result; - } - }); + Collections.sort(entryList, (entry1, entry2) -> { + if (entry1 == null || entry1.getValue() == null) { + return -1; + } else if (entry2 == null || entry2.getValue() == null) { + return 1; + } + int result = Long.compare(entry1.getValue().getWriteRequestCount(), + entry2.getValue().getWriteRequestCount()); + if (reverseOrder) { + result = -1 * result; + } + return result; + }); } else if (sortKey.equals("size")) { - Collections.sort(entryList, - new Comparator>() { - public int compare( - Map.Entry entry1, - Map.Entry entry2) { - if (entry1 == null || entry1.getValue() == null) { - return -1; - } else if (entry2 == null || entry2.getValue() == null) { - return 1; - } - int result = 0; - if (entry1.getValue().getStorefileSizeMB() < entry2.getValue() - .getStorefileSizeMB()) { - result = -1; - } else if (entry1.getValue().getStorefileSizeMB() > entry2 - .getValue().getStorefileSizeMB()) { - result = 1; - } - if (reverseOrder) { - result = -1 * result; - } - return result; - } - }); + Collections.sort(entryList, (entry1, entry2) -> { + if (entry1 == null || entry1.getValue() == null) { + return -1; + } else if (entry2 == null || entry2.getValue() == null) { + return 1; + } + int result = Double.compare(entry1.getValue().getStoreFileSize().get(), + entry2.getValue().getStoreFileSize().get()); + if (reverseOrder) { + result = -1 * result; + } + return result; + }); } else if (sortKey.equals("filecount")) { - Collections.sort(entryList, - new Comparator>() { - public int compare( - Map.Entry entry1, - Map.Entry entry2) { - if (entry1 == null || entry1.getValue() == null) { - return -1; - } else if (entry2 == null || entry2.getValue() == null) { - return 1; - } - int result = 0; - if (entry1.getValue().getStorefiles() < entry2.getValue() - .getStorefiles()) { - result = -1; - } else if (entry1.getValue().getStorefiles() > entry2.getValue() - .getStorefiles()) { - result = 1; - } - if (reverseOrder) { - result = -1 * result; - } - return result; - } - }); + Collections.sort(entryList, (entry1, entry2) -> { + if (entry1 == null || entry1.getValue() == null) { + return -1; + } else if (entry2 == null || entry2.getValue() == null) { + return 1; + } + int result = Integer.compare(entry1.getValue().getStoreCount(), + entry2.getValue().getStoreCount()); + if (reverseOrder) { + result = -1 * result; + } + return result; + }); } else if (sortKey.equals("memstore")) { - Collections.sort(entryList, - new Comparator>() { - public int compare( - Map.Entry entry1, - Map.Entry entry2) { - if (entry1 == null || entry1.getValue()==null) { - return -1; - } else if (entry2 == null || entry2.getValue()==null) { - return 1; - } - int result = 0; - if (entry1.getValue().getMemStoreSizeMB() < entry2.getValue() - .getMemStoreSizeMB()) { - result = -1; - } else if (entry1.getValue().getMemStoreSizeMB() > entry2 - .getValue().getMemStoreSizeMB()) { - result = 1; - } - if (reverseOrder) { - result = -1 * result; - } - return result; - } - }); + Collections.sort(entryList, (entry1, entry2) -> { + if (entry1 == null || entry1.getValue() == null) { + return -1; + } else if (entry2 == null || entry2.getValue() == null) { + return 1; + } + int result = Double.compare(entry1.getValue().getMemStoreSize().get(), + entry2.getValue().getMemStoreSize().get()); + if (reverseOrder) { + result = -1 * result; + } + return result; + }); } else if (sortKey.equals("locality")) { - Collections.sort(entryList, - new Comparator>() { - public int compare( - Map.Entry entry1, - Map.Entry entry2) { - if (entry1 == null || entry1.getValue()==null) { - return -1; - } else if (entry2 == null || entry2.getValue()==null) { - return 1; - } - int result = 0; - if (entry1.getValue().getDataLocality() < entry2.getValue() - .getDataLocality()) { - result = -1; - } else if (entry1.getValue().getDataLocality() > entry2 - .getValue().getDataLocality()) { - result = 1; - } - if (reverseOrder) { - result = -1 * result; - } - return result; - } - }); + Collections.sort(entryList, (entry1, entry2) -> { + if (entry1 == null || entry1.getValue() == null) { + return -1; + } else if (entry2 == null || entry2.getValue() == null) { + return 1; + } + int result = Double.compare(entry1.getValue().getDataLocality(), + entry2.getValue().getDataLocality()); + if (reverseOrder) { + result = -1 * result; + } + return result; + }); } } numRegions = regions.size(); @@ -632,10 +568,10 @@ ShowDetailName&Start/End Key region : regionSizes.entrySet()) { - RegionLoad regionLoad = Mockito.mock(RegionLoad.class); - when(regionLoad.getName()).thenReturn(region.getKey()); - when(regionLoad.getStorefileSizeMB()).thenReturn(region.getValue()); + RegionMetrics regionLoad = Mockito.mock(RegionMetrics.class); + when(regionLoad.getRegionName()).thenReturn(region.getKey()); + when(regionLoad.getStoreFileSize()) + .thenReturn(new Size(region.getValue(), Size.Unit.MEGABYTE)); // this is possibly broken with jdk9, unclear if false positive or not // suppress it for now, fix it when we get to running tests on 9 // see: http://errorprone.info/bugpattern/MockitoCast when((Object) masterServices.getServerManager().getLoad(sn). - getRegionsLoad().get(region.getKey())).thenReturn(regionLoad); + getRegionMetrics().get(region.getKey())).thenReturn(regionLoad); } try { when(masterRpcServices.isSplitOrMergeEnabled(any(),