diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RSGroupTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RSGroupTableAccessor.java new file mode 100644 index 00000000000..06a76049b47 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RSGroupTableAccessor.java @@ -0,0 +1,82 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos; +import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Read rs group information from hbase:rsgroup. + */ +@InterfaceAudience.Private +public final class RSGroupTableAccessor { + + //Assigned before user tables + private static final TableName RSGROUP_TABLE_NAME = + TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "rsgroup"); + private static final byte[] META_FAMILY_BYTES = Bytes.toBytes("m"); + private static final byte[] META_QUALIFIER_BYTES = Bytes.toBytes("i"); + + public static List getAllRSGroupInfo(Connection connection) + throws IOException { + try (Table rsGroupTable = connection.getTable(RSGROUP_TABLE_NAME)) { + List rsGroupInfos = new ArrayList<>(); + for (Result result : rsGroupTable.getScanner(new Scan())) { + RSGroupInfo rsGroupInfo = getRSGroupInfo(result); + if (rsGroupInfo != null) { + rsGroupInfos.add(rsGroupInfo); + } + } + return rsGroupInfos; + } + } + + private static RSGroupInfo getRSGroupInfo(Result result) throws IOException { + byte[] rsGroupInfo = result.getValue(META_FAMILY_BYTES, META_QUALIFIER_BYTES); + if (rsGroupInfo == null) { + return null; + } + RSGroupProtos.RSGroupInfo proto = + RSGroupProtos.RSGroupInfo.parseFrom(rsGroupInfo); + return ProtobufUtil.toGroupInfo(proto); + } + + public static RSGroupInfo getRSGroupInfo(Connection connection, byte[] rsGroupName) + throws IOException { + try (Table rsGroupTable = connection.getTable(RSGROUP_TABLE_NAME)){ + Result result = rsGroupTable.get(new Get(rsGroupName)); + return getRSGroupInfo(result); + } + } + + private RSGroupTableAccessor() { + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 7b63cd6385a..973925434e9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -71,6 +71,7 @@ import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.filter.ByteArrayComparable; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.TimeRange; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse; @@ -91,7 +92,9 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos; import org.apache.hadoop.hbase.protobuf.generated.TableProtos; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; +import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Bytes; @@ -255,10 +258,11 @@ public final class ProtobufUtil { * Return the Exception thrown by the remote server wrapped in * ServiceException as cause. RemoteException are left untouched. * - * @param se ServiceException that wraps IO exception thrown by the server + * @param e ServiceException that wraps IO exception thrown by the server * @return Exception wrapped in ServiceException. */ - public static IOException getServiceException(org.apache.hbase.thirdparty.com.google.protobuf.ServiceException e) { + public static IOException getServiceException( + org.apache.hbase.thirdparty.com.google.protobuf.ServiceException e) { Throwable t = e.getCause(); if (ExceptionUtil.isInterrupt(t)) { return ExceptionUtil.asInterrupt(t); @@ -1817,4 +1821,15 @@ public final class ProtobufUtil { int port = Addressing.parsePort(str); return ServerName.valueOf(hostname, port, -1L); } + + public static RSGroupInfo toGroupInfo(RSGroupProtos.RSGroupInfo proto) { + RSGroupInfo RSGroupInfo = new RSGroupInfo(proto.getName()); + for(HBaseProtos.ServerName el: proto.getServersList()) { + RSGroupInfo.addServer(Address.fromParts(el.getHostName(), el.getPort())); + } + for(TableProtos.TableName pTableName: proto.getTablesList()) { + RSGroupInfo.addTable(ProtobufUtil.toTableName(pTableName)); + } + return RSGroupInfo; + } } diff --git a/hbase-rsgroup/src/main/protobuf/RSGroup.proto b/hbase-protocol/src/main/protobuf/RSGroup.proto similarity index 100% rename from hbase-rsgroup/src/main/protobuf/RSGroup.proto rename to hbase-protocol/src/main/protobuf/RSGroup.proto diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon index 292a6681839..d0342db333a 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon @@ -184,7 +184,12 @@ AssignmentManager assignmentManager = master.getAssignmentManager(); or re-run HBCK in repair mode. - + <%if master.getMasterCoprocessorHost().findCoprocessor("RSGroupAdminEndpoint") != null %> +
+

RSGroup

+ <& RSGroupListTmpl; master= master; serverManager= serverManager&> +
+

Region Servers

<& RegionServerListTmpl; master= master; servers = servers &> diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RSGroupListTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RSGroupListTmpl.jamon new file mode 100644 index 00000000000..9f9831f7d5b --- /dev/null +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RSGroupListTmpl.jamon @@ -0,0 +1,352 @@ +<%doc> +Copyright The Apache Software Foundation + +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + +<%args> +HMaster master; +ServerManager serverManager; + + +<%import> + java.util.Collections; + java.util.List; + java.util.Map; + java.util.Set; + java.util.stream.Collectors; + org.apache.hadoop.hbase.master.HMaster; + org.apache.hadoop.hbase.ServerLoad; + org.apache.hadoop.hbase.RSGroupTableAccessor; + org.apache.hadoop.hbase.master.ServerManager; + org.apache.hadoop.hbase.net.Address; + org.apache.hadoop.hbase.rsgroup.RSGroupInfo; + org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; + +<%java> +List groups = RSGroupTableAccessor.getAllRSGroupInfo(master.getConnection()); + + +<%if (groups != null && groups.size() > 0)%> + +<%java> +RSGroupInfo [] rsGroupInfos = groups.toArray(new RSGroupInfo[groups.size()]); +Map collectServers = Collections.emptyMap(); +if (master.getServerManager() != null) { + collectServers = + master.getServerManager().getOnlineServers().entrySet().stream() + .collect(Collectors.toMap(p -> p.getKey().getAddress(), Map.Entry::getValue)); +} + + +
+ +
+
+ <& rsgroup_baseStats; rsGroupInfos = rsGroupInfos; collectServers= collectServers &> +
+
+ <& rsgroup_memoryStats; rsGroupInfos = rsGroupInfos; collectServers= collectServers &> +
+
+ <& rsgroup_requestStats; rsGroupInfos = rsGroupInfos; collectServers= collectServers &> +
+
+ <& rsgroup_storeStats; rsGroupInfos = rsGroupInfos; collectServers= collectServers &> +
+
+ <& rsgroup_compactStats; rsGroupInfos = rsGroupInfos; collectServers= collectServers &> +
+
+
+ + + +<%def rsgroup_baseStats> +<%args> + RSGroupInfo [] rsGroupInfos; + Map collectServers; + + + + + + + + + + + +<%java> + int totalOnlineServers = 0; + int totalDeadServers = 0; + int totalTables = 0; + int totalRequests = 0; + int totalRegions = 0; + for (RSGroupInfo rsGroupInfo: rsGroupInfos) { + String rsGroupName = rsGroupInfo.getName(); + int onlineServers = 0; + int deadServers = 0; + int tables = 0; + long requestsPerSecond = 0; + int numRegionsOnline = 0; + Set
servers = rsGroupInfo.getServers(); + for (Address server : servers) { + ServerLoad sl = collectServers.get(server); + if (sl != null) { + requestsPerSecond += sl.getNumberOfRequests(); + numRegionsOnline += sl.getNumberOfRegions(); + //rsgroup total + totalRegions += sl.getNumberOfRegions(); + totalRequests += sl.getNumberOfRequests(); + totalOnlineServers++; + onlineServers++; + } else { + totalDeadServers++; + deadServers++; + } + } + tables = rsGroupInfo.getTables().size(); + totalTables += tables; + double avgLoad = onlineServers == 0 ? 0 : + (double)numRegionsOnline / (double)onlineServers; + +
+ + + + + + + + +<%java> +} + + + + + + + + + +
RSGroup NameNum. Online ServersNum. Dead ServersNum. TablesRequests Per SecondNum. RegionsAverage Load
<& rsGroupLink; rsGroupName=rsGroupName; &><% onlineServers %><% deadServers %><% tables %><% requestsPerSecond %><% numRegionsOnline %><% avgLoad %>
Total:<% rsGroupInfos.length %><% totalOnlineServers %><% totalDeadServers %><% totalTables %><% totalRequests %><% totalRegions %><% master.getServerManager().getAverageLoad() %>
+ + +<%def rsgroup_memoryStats> +<%args> + RSGroupInfo [] rsGroupInfos; + Map collectServers; + + + + + + + + + +<%java> + for (RSGroupInfo rsGroupInfo: rsGroupInfos) { + String rsGroupName = rsGroupInfo.getName(); + long usedHeap = 0; + long maxHeap = 0; + long memstoreSize = 0; + for (Address server : rsGroupInfo.getServers()) { + ServerLoad sl = collectServers.get(server); + if (sl != null) { + usedHeap += sl.getUsedHeapMB(); + maxHeap += sl.getMaxHeapMB(); + memstoreSize += sl.getMemstoreSizeInMB(); + } + } + + + + + + + + +<%java> +} + +
RSGroup NameUsed HeapMax HeapMemstore Size
<& rsGroupLink; rsGroupName=rsGroupName; &><% TraditionalBinaryPrefix.long2String(usedHeap + * TraditionalBinaryPrefix.MEGA.value, "B", 1) %><% TraditionalBinaryPrefix.long2String(maxHeap + * TraditionalBinaryPrefix.MEGA.value, "B", 1) %><% TraditionalBinaryPrefix.long2String(memstoreSize + * TraditionalBinaryPrefix.MEGA.value, "B", 1) %>
+ + +<%def rsgroup_requestStats> +<%args> + RSGroupInfo [] rsGroupInfos; + Map collectServers; + + + + + + + + +<%java> + for (RSGroupInfo rsGroupInfo: rsGroupInfos) { + String rsGroupName = rsGroupInfo.getName(); + long requestsPerSecond = 0; + long readRequests = 0; + long writeRequests = 0; + for (Address server : rsGroupInfo.getServers()) { + ServerLoad sl = collectServers.get(server); + if (sl != null) { + requestsPerSecond += sl.getNumberOfRequests(); + readRequests += sl.getReadRequestsCount(); + writeRequests += sl.getWriteRequestsCount(); + } + } + + + + + + + +<%java> +} + +
RSGroup NameRequest Per SecondRead Request CountWrite Request Count
<& rsGroupLink; rsGroupName=rsGroupName; &><% requestsPerSecond %><% readRequests %><% writeRequests %>
+ + + +<%def rsgroup_storeStats> +<%args> + RSGroupInfo [] rsGroupInfos; + Map collectServers; + + + + + + + + + + + +<%java> + for (RSGroupInfo rsGroupInfo: rsGroupInfos) { + String rsGroupName = rsGroupInfo.getName(); + int numStores = 0; + long numStorefiles = 0; + long uncompressedStorefileSize = 0; + long storefileSize = 0; + long indexSize = 0; + long bloomSize = 0; + int count = 0; + for (Address server : rsGroupInfo.getServers()) { + ServerLoad sl = collectServers.get(server); + if (sl != null) { + numStores += sl.getStores(); + numStorefiles += sl.getStorefiles(); + uncompressedStorefileSize += sl.getStoreUncompressedSizeMB(); + storefileSize += sl.getStorefileSizeInMB(); + indexSize += sl.getTotalStaticIndexSizeKB(); + bloomSize += sl.getTotalStaticBloomSizeKB(); + count++; + } + } + + + + + + + + + + +<%java> +} + +
RSGroup NameNum. StoresNum. StorefilesStorefile Size UncompressedStorefile SizeIndex SizeBloom Size
<& rsGroupLink; rsGroupName=rsGroupName; &><% numStores %><% numStorefiles %><% TraditionalBinaryPrefix.long2String( + uncompressedStorefileSize * TraditionalBinaryPrefix.MEGA.value, "B", 1) %><% TraditionalBinaryPrefix.long2String( + storefileSize * TraditionalBinaryPrefix.MEGA.value, "B", 1) %><% TraditionalBinaryPrefix.long2String( + indexSize * TraditionalBinaryPrefix.KILO.value, "B", 1) %><% TraditionalBinaryPrefix.long2String( + bloomSize * TraditionalBinaryPrefix.KILO.value, "B", 1) %>
+ + +<%def rsgroup_compactStats> +<%args> + RSGroupInfo [] rsGroupInfos; + Map collectServers; + + + + + + + + + +<%java> + for (RSGroupInfo rsGroupInfo: rsGroupInfos) { + String rsGroupName = rsGroupInfo.getName(); + int numStores = 0; + long totalCompactingKVs = 0; + long numCompactedKVs = 0; + long remainingKVs = 0; + long compactionProgress = 0; + for (Address server : rsGroupInfo.getServers()) { + ServerLoad sl = collectServers.get(server); + if (sl != null) { + totalCompactingKVs += sl.getTotalCompactingKVs(); + numCompactedKVs += sl.getCurrentCompactedKVs(); + } + } + remainingKVs = totalCompactingKVs - numCompactedKVs; + String percentDone = ""; + if (totalCompactingKVs > 0) { + percentDone = String.format("%.2f", 100 * + ((float) numCompactedKVs / totalCompactingKVs)) + "%"; + } + + + + + + + + +<%java> +} + +
RSGroup NameNum. Compacting KVsNum. Compacted KVsRemaining KVsCompaction Progress
<& rsGroupLink; rsGroupName=rsGroupName; &><% totalCompactingKVs %><% numCompactedKVs %><% remainingKVs %><% percentDone %>
+ + + +<%def rsGroupLink> + <%args> + String rsGroupName; + + ><% rsGroupName %> + \ No newline at end of file diff --git a/hbase-server/src/main/resources/hbase-webapps/master/rsgroup.jsp b/hbase-server/src/main/resources/hbase-webapps/master/rsgroup.jsp new file mode 100644 index 00000000000..9f95b763cca --- /dev/null +++ b/hbase-server/src/main/resources/hbase-webapps/master/rsgroup.jsp @@ -0,0 +1,441 @@ +<%-- +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +--%> +<%@ page contentType="text/html;charset=UTF-8" + import="java.util.ArrayList" + import="java.util.Collections" + import="java.util.Date" + import="java.util.List" + import="java.util.Map" + import="java.util.regex.Pattern" + import="java.util.stream.Stream" + import="java.util.stream.Collectors" + import="org.apache.hadoop.hbase.HTableDescriptor" + import="org.apache.hadoop.hbase.RSGroupTableAccessor" + import="org.apache.hadoop.hbase.ServerLoad" + import="org.apache.hadoop.hbase.ServerName" + import="org.apache.hadoop.hbase.TableName" + import="org.apache.hadoop.hbase.client.Admin" + import="org.apache.hadoop.hbase.client.RegionInfo" + import="org.apache.hadoop.hbase.client.TableState" + import="org.apache.hadoop.hbase.client.TableDescriptor" + import="org.apache.hadoop.hbase.master.HMaster" + import="org.apache.hadoop.hbase.master.RegionState" + import="org.apache.hadoop.hbase.net.Address" + import="org.apache.hadoop.hbase.rsgroup.RSGroupInfo" + import="org.apache.hadoop.hbase.util.Bytes" + import="org.apache.hadoop.hbase.util.VersionInfo" + import="org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix"%> +<% + HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER); + String rsGroupName = request.getParameter("name"); + List
rsGroupServers = new ArrayList<>(); + List rsGroupTables = new ArrayList<>(); + RSGroupInfo rsGroupInfo = null; + if (rsGroupName != null && !rsGroupName.isEmpty()) { + rsGroupInfo = RSGroupTableAccessor.getRSGroupInfo( + master.getConnection(), Bytes.toBytes(rsGroupName)); + if (rsGroupInfo != null) { + rsGroupServers.addAll(rsGroupInfo.getServers()); + rsGroupTables.addAll(rsGroupInfo.getTables()); + } + } + Collections.sort(rsGroupServers); + rsGroupTables.sort((o1, o2) -> { + int compare = Bytes.compareTo(o1.getNamespace(), o2.getNamespace()); + if (compare != 0) + return compare; + compare = Bytes.compareTo(o1.getQualifier(), o2.getQualifier()); + if (compare != 0) + return compare; + return 0; + }); + + Map onlineServers = Collections.emptyMap(); + Map serverMaping = Collections.emptyMap(); + if (master.getServerManager() != null) { + onlineServers = master.getServerManager().getOnlineServers().entrySet().stream() + .collect(Collectors.toMap(p -> p.getKey().getAddress(), Map.Entry::getValue)); + serverMaping = + master.getServerManager().getOnlineServers().entrySet().stream() + .collect(Collectors.toMap(p -> p.getKey().getAddress(), Map.Entry::getKey)); + } + pageContext.setAttribute("pageTitle", "RSGroup: " + rsGroupName); +%> + + + +
+ <% if (rsGroupName == null || rsGroupName.isEmpty() || rsGroupInfo == null) { %> +
+ +
+

Go Back, or wait for the redirect. + <% } else { %> +

+
+ +
+
+
+
+
+

Region Servers

+
+
+
+ <% if (rsGroupServers != null && rsGroupServers.size() > 0) { %> + + +
+
+ + + + + + + + + + <% int totalRegions = 0; + int totalRequests = 0; + int inconsistentNodeNum = 0; + String masterVersion = VersionInfo.getVersion(); + for (Address server: rsGroupServers) { + ServerName serverName = serverMaping.get(server); + if (serverName != null) { + ServerLoad sl = onlineServers.get(server); + String version = master.getRegionServerVersion(serverName); + if (!masterVersion.equals(version)) { + inconsistentNodeNum ++; + } + double requestsPerSecond = 0.0; + int numRegionsOnline = 0; + long lastContact = 0; + if (sl != null) { + requestsPerSecond = sl.getRequestsPerSecond(); + numRegionsOnline = sl.getNumberOfRegions(); + totalRegions += sl.getNumberOfRegions(); + totalRequests += sl.getNumberOfRequests(); + lastContact = (System.currentTimeMillis() - sl.getReportTime())/1000; + } + long startcode = serverName.getStartcode(); + int infoPort = master.getRegionServerInfoPort(serverName); + String url = "//" + serverName.getHostname() + ":" + infoPort + "/rs-status";%> + + + + + + + + + <% } else { %> + + + + + + + + + <% } %> + <% } %> + + + + <%if (inconsistentNodeNum > 0) { %> + + <%} else { %> + + <%} %> + + + +
ServerNameStart timeLast contactVersionRequests Per SecondNum. Regions
<%= serverName.getServerName() %><%= new Date(startcode) %><%= lastContact %><%= version %><%= String.format("%.0f", requestsPerSecond) %><%= numRegionsOnline %>
<%= server %><%= "Dead" %>
Total:<%= rsGroupServers.size() %><%= inconsistentNodeNum %> nodes with inconsistent version<%= totalRequests %><%= totalRegions %>
+
+
+ + + + + + + + <% for (Address server: rsGroupServers) { + ServerName serverName = serverMaping.get(server); + ServerLoad sl = onlineServers.get(server); + if (sl != null && serverName != null) { + int infoPort = master.getRegionServerInfoPort(serverName); + String url = "//" + serverName.getHostname() + ":" + infoPort + "/rs-status"; + %> + + + + + + + <% } else { %> + + + + + + + <% } + } %> +
ServerNameUsed HeapMax HeapMemstore Size
<%= serverName.getServerName() %><%= TraditionalBinaryPrefix.long2String(sl.getUsedHeapMB() + * TraditionalBinaryPrefix.MEGA.value, "B", 1) %><%= TraditionalBinaryPrefix.long2String(sl.getMaxHeapMB() + * TraditionalBinaryPrefix.MEGA.value, "B", 1) %><%= TraditionalBinaryPrefix.long2String(sl.getMemstoreSizeInMB() + * TraditionalBinaryPrefix.MEGA.value, "B", 1) %>
<%= server %>
+
+
+ + + + + + + + <% for (Address server: rsGroupServers) { + ServerName serverName = serverMaping.get(server); + ServerLoad sl = onlineServers.get(server); + if (sl != null && serverName != null) { + int infoPort = master.getRegionServerInfoPort(serverName); + String url = "//" + serverName.getHostname() + ":" + infoPort + "/rs-status"; + %> + + + + + + + <% } else { %> + + + + + + + <% } + } %> +
ServerNameRequest Per SecondRead Request CountWrite Request Count
<%= serverName.getServerName() %><%= String.format("%.0f", sl.getRequestsPerSecond()) %><%= sl.getReadRequestsCount() %><%= sl.getWriteRequestsCount() %>
<%= server %>
+
+
+ + + + + + + + + + + <% for (Address server: rsGroupServers) { + ServerName serverName = serverMaping.get(server); + ServerLoad sl = onlineServers.get(server); + if (sl != null && serverName != null) { + int infoPort = master.getRegionServerInfoPort(serverName); + String url = "//" + serverName.getHostname() + ":" + infoPort + "/rs-status"; + %> + + + + + + + + + + <% } else { %> + + + + + + + + + + <% } + } %> +
ServerNameNum. StoresNum. StorefilesStorefile Size UncompressedStorefile SizeIndex SizeBloom Size
<%= serverName.getServerName() %><%= sl.getStores() %><%= sl.getStorefiles() %><%= TraditionalBinaryPrefix.long2String( + sl.getStoreUncompressedSizeMB() * TraditionalBinaryPrefix.MEGA.value, "B", 1) %><%= TraditionalBinaryPrefix.long2String(sl.getStorefileSizeInMB() + * TraditionalBinaryPrefix.MEGA.value, "B", 1) %><%= TraditionalBinaryPrefix.long2String(sl.getTotalStaticIndexSizeKB() + * TraditionalBinaryPrefix.KILO.value, "B", 1) %><%= TraditionalBinaryPrefix.long2String(sl.getTotalStaticBloomSizeKB() + * TraditionalBinaryPrefix.KILO.value, "B", 1) %>
<%= server %>
+
+
+ + + + + + + + + <% for (Address server: rsGroupServers) { + ServerName serverName = serverMaping.get(server); + ServerLoad sl = onlineServers.get(server); + if (sl != null && serverName != null) { + String percentDone = ""; + if (sl.getTotalCompactingKVs() > 0) { + percentDone = String.format("%.2f", 100 * + ((float) sl.getCurrentCompactedKVs() / sl.getTotalCompactingKVs())) + "%"; + } + int infoPort = master.getRegionServerInfoPort(serverName); + String url = "//" + serverName.getHostname() + ":" + infoPort + "/rs-status"; + %> + + + + + + + + <% } else { %> + + + + + + + + <% } + } %> +
ServerNameNum. Compacting KVsNum. Compacted KVsRemaining KVsCompaction Progress
<%= serverName.getServerName() %><%= sl.getTotalCompactingKVs() %><%= sl.getCurrentCompactedKVs() %><%= sl.getTotalCompactingKVs() - sl.getCurrentCompactedKVs() %><%= percentDone %>
<%= server %>
+
+
+ <% } else { %> +

No Region Servers

+ <% } %> +
+
+
+ +
+
+ +
+ + <% if (rsGroupTables != null && rsGroupTables.size() > 0) { + HTableDescriptor[] tables = null; + try (Admin admin = master.getConnection().getAdmin()) { + tables = master.isInitialized() ? admin.listTables((Pattern)null, true) : null; + } + Map tableDescriptors + = Stream.of(tables).collect(Collectors.toMap(TableDescriptor::getTableName, p -> p)); + %> + + + + + + + + + + + + + <% for(TableName tableName : rsGroupTables) { + HTableDescriptor htDesc = tableDescriptors.get(tableName); + if(htDesc == null) { + %> + + + + + + + + + + + + <% } else { %> + + + + <% TableState.State tableState = master.getTableStateManager().getTableState(tableName); + if(TableState.isInStates(tableState, + TableState.State.DISABLED, TableState.State.DISABLING)) { + %> + + <% } else { %> + + <% } %> + <% Map> tableRegions = + master.getAssignmentManager().getRegionStates().getRegionByStateOfTable(tableName); + int openRegionsCount = tableRegions.get(RegionState.State.OPEN).size(); + int offlineRegionsCount = tableRegions.get(RegionState.State.OFFLINE).size(); + int splitRegionsCount = tableRegions.get(RegionState.State.SPLIT).size(); + int failedRegionsCount = tableRegions.get(RegionState.State.FAILED_OPEN).size() + + tableRegions.get(RegionState.State.FAILED_CLOSE).size(); + int otherRegionsCount = 0; + for (List list: tableRegions.values()) { + otherRegionsCount += list.size(); + } + // now subtract known states + otherRegionsCount = otherRegionsCount - openRegionsCount + - failedRegionsCount - offlineRegionsCount + - splitRegionsCount; + %> + + + + + + + + <% } + } %> +

<%= rsGroupTables.size() %> table(s) in set.

+
NamespaceTableStatsOnline RegionsOffline RegionsFailed RegionsSplit RegionsOther RegionsDescription
<%= tableName.getNamespaceAsString() %><%= tableName.getQualifierAsString() %><%= "DELETED" %>
<%= tableName.getNamespaceAsString() %><%= tableName.getQualifierAsString() %><%= tableState.name() %><%= tableState.name() %><%= openRegionsCount %><%= offlineRegionsCount %><%= failedRegionsCount %><%= splitRegionsCount %><%= otherRegionsCount %><%= htDesc.toStringCustomizedValues() %>
+ <% } else { %> +

No Tables

+ <% } %> +
+<% } %> +
+