HBASE-20093 Replace ServerLoad by ServerMetrics for ServerManager

Signed-off-by: tedyu <yuzhihong@gmail.com>
This commit is contained in:
Chia-Ping Tsai 2018-02-27 23:20:06 +08:00
parent ba063abd2f
commit 7f6e971c4c
14 changed files with 381 additions and 360 deletions

View File

@ -22,8 +22,14 @@ import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import java.io.*; import java.io.File;
import java.util.*; import java.io.IOException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -34,8 +40,8 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.RegionLoad; import org.apache.hadoop.hbase.RegionMetrics;
import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerMetrics;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
@ -47,8 +53,10 @@ import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.ClassLoaderTestHelper; import org.apache.hadoop.hbase.util.ClassLoaderTestHelper;
import org.apache.hadoop.hbase.util.CoprocessorClassLoader; import org.apache.hadoop.hbase.util.CoprocessorClassLoader;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.*; import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.ClassRule; import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -482,13 +490,13 @@ public class TestClassLoading {
* @param tableName : given table. * @param tableName : given table.
* @return subset of all servers. * @return subset of all servers.
*/ */
Map<ServerName, ServerLoad> serversForTable(String tableName) { Map<ServerName, ServerMetrics> serversForTable(String tableName) {
Map<ServerName, ServerLoad> serverLoadHashMap = new HashMap<>(); Map<ServerName, ServerMetrics> serverLoadHashMap = new HashMap<>();
for(Map.Entry<ServerName,ServerLoad> server: for(Map.Entry<ServerName, ServerMetrics> server:
TEST_UTIL.getMiniHBaseCluster().getMaster().getServerManager(). TEST_UTIL.getMiniHBaseCluster().getMaster().getServerManager().
getOnlineServers().entrySet()) { getOnlineServers().entrySet()) {
for( Map.Entry<byte[], RegionLoad> region: for(Map.Entry<byte[], RegionMetrics> region:
server.getValue().getRegionsLoad().entrySet()) { server.getValue().getRegionMetrics().entrySet()) {
if (region.getValue().getNameAsString().equals(tableName)) { if (region.getValue().getNameAsString().equals(tableName)) {
// this server hosts a region of tableName: add this server.. // this server hosts a region of tableName: add this server..
serverLoadHashMap.put(server.getKey(),server.getValue()); serverLoadHashMap.put(server.getKey(),server.getValue());
@ -501,8 +509,7 @@ public class TestClassLoading {
} }
void assertAllRegionServers(String tableName) throws InterruptedException { void assertAllRegionServers(String tableName) throws InterruptedException {
Map<ServerName, ServerLoad> servers; Map<ServerName, ServerMetrics> servers;
String[] actualCoprocessors = null;
boolean success = false; boolean success = false;
String[] expectedCoprocessors = regionServerSystemCoprocessors; String[] expectedCoprocessors = regionServerSystemCoprocessors;
if (tableName == null) { if (tableName == null) {
@ -513,8 +520,9 @@ public class TestClassLoading {
} }
for (int i = 0; i < 5; i++) { for (int i = 0; i < 5; i++) {
boolean any_failed = false; boolean any_failed = false;
for(Map.Entry<ServerName,ServerLoad> server: servers.entrySet()) { for(Map.Entry<ServerName, ServerMetrics> server: servers.entrySet()) {
actualCoprocessors = server.getValue().getRsCoprocessors(); String[] actualCoprocessors =
server.getValue().getCoprocessorNames().stream().toArray(size -> new String[size]);
if (!Arrays.equals(actualCoprocessors, expectedCoprocessors)) { if (!Arrays.equals(actualCoprocessors, expectedCoprocessors)) {
LOG.debug("failed comparison: actual: " + LOG.debug("failed comparison: actual: " +
Arrays.toString(actualCoprocessors) + Arrays.toString(actualCoprocessors) +

View File

@ -124,8 +124,8 @@ public class TestRSGroupsOfflineMode {
LOG.info("Waiting for region unassignments on failover RS..."); LOG.info("Waiting for region unassignments on failover RS...");
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() { TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override public boolean evaluate() throws Exception { @Override public boolean evaluate() throws Exception {
return master.getServerManager().getLoad(failoverRS.getServerName()) return !master.getServerManager().getLoad(failoverRS.getServerName())
.getRegionsLoad().size() > 0; .getRegionMetrics().isEmpty();
} }
}); });
} }

View File

@ -30,7 +30,9 @@ ServerManager serverManager;
java.util.Set; java.util.Set;
java.util.stream.Collectors; java.util.stream.Collectors;
org.apache.hadoop.hbase.master.HMaster; org.apache.hadoop.hbase.master.HMaster;
org.apache.hadoop.hbase.ServerLoad; org.apache.hadoop.hbase.RegionMetrics;
org.apache.hadoop.hbase.ServerMetrics;
org.apache.hadoop.hbase.Size;
org.apache.hadoop.hbase.RSGroupTableAccessor; org.apache.hadoop.hbase.RSGroupTableAccessor;
org.apache.hadoop.hbase.master.ServerManager; org.apache.hadoop.hbase.master.ServerManager;
org.apache.hadoop.hbase.net.Address; org.apache.hadoop.hbase.net.Address;
@ -45,7 +47,7 @@ List<RSGroupInfo> groups = RSGroupTableAccessor.getAllRSGroupInfo(master.getConn
<%java> <%java>
RSGroupInfo [] rsGroupInfos = groups.toArray(new RSGroupInfo[groups.size()]); RSGroupInfo [] rsGroupInfos = groups.toArray(new RSGroupInfo[groups.size()]);
Map<Address, ServerLoad> collectServers = Collections.emptyMap(); Map<Address, ServerMetrics> collectServers = Collections.emptyMap();
if (master.getServerManager() != null) { if (master.getServerManager() != null) {
collectServers = collectServers =
master.getServerManager().getOnlineServers().entrySet().stream() master.getServerManager().getOnlineServers().entrySet().stream()
@ -85,7 +87,7 @@ if (master.getServerManager() != null) {
<%def rsgroup_baseStats> <%def rsgroup_baseStats>
<%args> <%args>
RSGroupInfo [] rsGroupInfos; RSGroupInfo [] rsGroupInfos;
Map<Address, ServerLoad> collectServers; Map<Address, ServerMetrics> collectServers;
</%args> </%args>
<table class="table table-striped"> <table class="table table-striped">
<tr> <tr>
@ -112,13 +114,13 @@ if (master.getServerManager() != null) {
int numRegionsOnline = 0; int numRegionsOnline = 0;
Set<Address> servers = rsGroupInfo.getServers(); Set<Address> servers = rsGroupInfo.getServers();
for (Address server : servers) { for (Address server : servers) {
ServerLoad sl = collectServers.get(server); ServerMetrics sl = collectServers.get(server);
if (sl != null) { if (sl != null) {
requestsPerSecond += sl.getNumberOfRequests(); requestsPerSecond += sl.getRequestCountPerSecond();
numRegionsOnline += sl.getNumberOfRegions(); numRegionsOnline += sl.getRegionMetrics().size();
//rsgroup total //rsgroup total
totalRegions += sl.getNumberOfRegions(); totalRegions += sl.getRegionMetrics().size();
totalRequests += sl.getNumberOfRequests(); totalRequests += sl.getRequestCountPerSecond();
totalOnlineServers++; totalOnlineServers++;
onlineServers++; onlineServers++;
} else { } else {
@ -157,7 +159,7 @@ if (master.getServerManager() != null) {
<%def rsgroup_memoryStats> <%def rsgroup_memoryStats>
<%args> <%args>
RSGroupInfo [] rsGroupInfos; RSGroupInfo [] rsGroupInfos;
Map<Address, ServerLoad> collectServers; Map<Address, ServerMetrics> collectServers;
</%args> </%args>
<table class="table table-striped"> <table class="table table-striped">
<tr> <tr>
@ -174,11 +176,12 @@ if (master.getServerManager() != null) {
long maxHeap = 0; long maxHeap = 0;
long memstoreSize = 0; long memstoreSize = 0;
for (Address server : rsGroupInfo.getServers()) { for (Address server : rsGroupInfo.getServers()) {
ServerLoad sl = collectServers.get(server); ServerMetrics sl = collectServers.get(server);
if (sl != null) { if (sl != null) {
usedHeap += sl.getUsedHeapMB(); usedHeap += (long) sl.getUsedHeapSize().get(Size.Unit.MEGABYTE);
maxHeap += sl.getMaxHeapMB(); maxHeap += (long) sl.getMaxHeapSize().get(Size.Unit.MEGABYTE);
memstoreSize += sl.getMemstoreSizeInMB(); memstoreSize += (long) sl.getRegionMetrics().values().stream().mapToDouble(
rm -> rm.getMemStoreSize().get(Size.Unit.MEGABYTE)).sum();
} }
} }
</%java> </%java>
@ -201,7 +204,7 @@ if (master.getServerManager() != null) {
<%def rsgroup_requestStats> <%def rsgroup_requestStats>
<%args> <%args>
RSGroupInfo [] rsGroupInfos; RSGroupInfo [] rsGroupInfos;
Map<Address, ServerLoad> collectServers; Map<Address, ServerMetrics> collectServers;
</%args> </%args>
<table class="table table-striped"> <table class="table table-striped">
<tr> <tr>
@ -217,11 +220,13 @@ if (master.getServerManager() != null) {
long readRequests = 0; long readRequests = 0;
long writeRequests = 0; long writeRequests = 0;
for (Address server : rsGroupInfo.getServers()) { for (Address server : rsGroupInfo.getServers()) {
ServerLoad sl = collectServers.get(server); ServerMetrics sl = collectServers.get(server);
if (sl != null) { if (sl != null) {
requestsPerSecond += sl.getNumberOfRequests(); for (RegionMetrics rm : sl.getRegionMetrics().values()) {
readRequests += sl.getReadRequestsCount(); readRequests += rm.getReadRequestCount();
writeRequests += sl.getWriteRequestsCount(); writeRequests += rm.getWriteRequestCount();
}
requestsPerSecond += sl.getRequestCountPerSecond();
} }
} }
</%java> </%java>
@ -241,7 +246,7 @@ if (master.getServerManager() != null) {
<%def rsgroup_storeStats> <%def rsgroup_storeStats>
<%args> <%args>
RSGroupInfo [] rsGroupInfos; RSGroupInfo [] rsGroupInfos;
Map<Address, ServerLoad> collectServers; Map<Address, ServerMetrics> collectServers;
</%args> </%args>
<table class="table table-striped"> <table class="table table-striped">
<tr> <tr>
@ -264,14 +269,16 @@ if (master.getServerManager() != null) {
long bloomSize = 0; long bloomSize = 0;
int count = 0; int count = 0;
for (Address server : rsGroupInfo.getServers()) { for (Address server : rsGroupInfo.getServers()) {
ServerLoad sl = collectServers.get(server); ServerMetrics sl = collectServers.get(server);
if (sl != null) { if (sl != null) {
numStores += sl.getStores(); for (RegionMetrics rm : sl.getRegionMetrics().values()) {
numStorefiles += sl.getStorefiles(); numStores += rm.getStoreCount();
uncompressedStorefileSize += sl.getStoreUncompressedSizeMB(); numStorefiles += rm.getStoreFileCount();
storefileSize += sl.getStorefileSizeInMB(); uncompressedStorefileSize += rm.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE);
indexSize += sl.getTotalStaticIndexSizeKB(); storefileSize += rm.getStoreFileSize().get(Size.Unit.MEGABYTE);
bloomSize += sl.getTotalStaticBloomSizeKB(); indexSize += rm.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE);
bloomSize += rm.getBloomFilterSize().get(Size.Unit.KILOBYTE);
}
count++; count++;
} }
} }
@ -298,7 +305,7 @@ if (master.getServerManager() != null) {
<%def rsgroup_compactStats> <%def rsgroup_compactStats>
<%args> <%args>
RSGroupInfo [] rsGroupInfos; RSGroupInfo [] rsGroupInfos;
Map<Address, ServerLoad> collectServers; Map<Address, ServerMetrics> collectServers;
</%args> </%args>
<table class="table table-striped"> <table class="table table-striped">
<tr> <tr>
@ -312,28 +319,30 @@ if (master.getServerManager() != null) {
for (RSGroupInfo rsGroupInfo: rsGroupInfos) { for (RSGroupInfo rsGroupInfo: rsGroupInfos) {
String rsGroupName = rsGroupInfo.getName(); String rsGroupName = rsGroupInfo.getName();
int numStores = 0; int numStores = 0;
long totalCompactingKVs = 0; long totalCompactingCells = 0;
long numCompactedKVs = 0; long totalCompactedCells = 0;
long remainingKVs = 0; long remainingKVs = 0;
long compactionProgress = 0; long compactionProgress = 0;
for (Address server : rsGroupInfo.getServers()) { for (Address server : rsGroupInfo.getServers()) {
ServerLoad sl = collectServers.get(server); ServerMetrics sl = collectServers.get(server);
if (sl != null) { if (sl != null) {
totalCompactingKVs += sl.getTotalCompactingKVs(); for (RegionMetrics rl : sl.getRegionMetrics().values()) {
numCompactedKVs += sl.getCurrentCompactedKVs(); totalCompactingCells += rl.getCompactingCellCount();
totalCompactedCells += rl.getCompactedCellCount();
}
} }
} }
remainingKVs = totalCompactingKVs - numCompactedKVs; remainingKVs = totalCompactingCells - totalCompactedCells;
String percentDone = ""; String percentDone = "";
if (totalCompactingKVs > 0) { if (totalCompactingCells > 0) {
percentDone = String.format("%.2f", 100 * percentDone = String.format("%.2f", 100 *
((float) numCompactedKVs / totalCompactingKVs)) + "%"; ((float) totalCompactedCells / totalCompactingCells)) + "%";
} }
</%java> </%java>
<tr> <tr>
<td><& rsGroupLink; rsGroupName=rsGroupName; &></td> <td><& rsGroupLink; rsGroupName=rsGroupName; &></td>
<td><% totalCompactingKVs %></td> <td><% totalCompactingCells %></td>
<td><% numCompactedKVs %></td> <td><% totalCompactedCells %></td>
<td><% remainingKVs %></td> <td><% remainingKVs %></td>
<td><% percentDone %></td> <td><% percentDone %></td>
</tr> </tr>

View File

@ -26,8 +26,10 @@ HMaster master;
<%import> <%import>
java.util.*; java.util.*;
org.apache.hadoop.hbase.master.HMaster; org.apache.hadoop.hbase.master.HMaster;
org.apache.hadoop.hbase.ServerLoad; org.apache.hadoop.hbase.RegionMetrics;
org.apache.hadoop.hbase.ServerMetrics;
org.apache.hadoop.hbase.ServerName; org.apache.hadoop.hbase.ServerName;
org.apache.hadoop.hbase.Size;
org.apache.hadoop.hbase.util.VersionInfo; org.apache.hadoop.hbase.util.VersionInfo;
org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
</%import> </%import>
@ -84,12 +86,12 @@ Arrays.sort(serverNames);
</tr> </tr>
<%java> <%java>
int totalRegions = 0; int totalRegions = 0;
int totalRequests = 0; int totalRequestsPerSecond = 0;
int inconsistentNodeNum = 0; int inconsistentNodeNum = 0;
String masterVersion = VersionInfo.getVersion(); String masterVersion = VersionInfo.getVersion();
for (ServerName serverName: serverNames) { for (ServerName serverName: serverNames) {
ServerLoad sl = master.getServerManager().getLoad(serverName); ServerMetrics sl = master.getServerManager().getLoad(serverName);
String version = master.getRegionServerVersion(serverName); String version = master.getRegionServerVersion(serverName);
if (!masterVersion.equals(version)) { if (!masterVersion.equals(version)) {
inconsistentNodeNum ++; inconsistentNodeNum ++;
@ -100,12 +102,11 @@ Arrays.sort(serverNames);
long lastContact = 0; long lastContact = 0;
if (sl != null) { if (sl != null) {
requestsPerSecond = sl.getRequestsPerSecond(); requestsPerSecond = sl.getRequestCountPerSecond();
numRegionsOnline = sl.getNumberOfRegions(); numRegionsOnline = sl.getRegionMetrics().size();
totalRegions += sl.getNumberOfRegions(); totalRegions += sl.getRegionMetrics().size();
// Is this correct? Adding a rate to a measure. totalRequestsPerSecond += sl.getRequestCountPerSecond();
totalRequests += sl.getNumberOfRequests(); lastContact = (System.currentTimeMillis() - sl.getReportTimestamp())/1000;
lastContact = (System.currentTimeMillis() - sl.getReportTime())/1000;
} }
long startcode = serverName.getStartcode(); long startcode = serverName.getStartcode();
</%java> </%java>
@ -128,7 +129,7 @@ Arrays.sort(serverNames);
<%else> <%else>
<td></td> <td></td>
</%if> </%if>
<td><% totalRequests %></td> <td><% totalRequestsPerSecond %></td>
<td><% totalRegions %></td> <td><% totalRegions %></td>
</tr> </tr>
</table> </table>
@ -149,16 +150,20 @@ Arrays.sort(serverNames);
<%java> <%java>
for (ServerName serverName: serverNames) { for (ServerName serverName: serverNames) {
ServerLoad sl = master.getServerManager().getLoad(serverName); ServerMetrics sl = master.getServerManager().getLoad(serverName);
if (sl != null) { if (sl != null) {
long memStoreSizeMB = 0;
for (RegionMetrics rl : sl.getRegionMetrics().values()) {
memStoreSizeMB += rl.getMemStoreSize().get(Size.Unit.MEGABYTE);
}
</%java> </%java>
<tr> <tr>
<td><& serverNameLink; serverName=serverName; serverLoad = sl; &></td> <td><& serverNameLink; serverName=serverName; serverLoad = sl; &></td>
<td><% TraditionalBinaryPrefix.long2String(sl.getUsedHeapMB() <td><% TraditionalBinaryPrefix.long2String((long) sl.getUsedHeapSize().get(Size.Unit.MEGABYTE)
* TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td> * TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
<td><% TraditionalBinaryPrefix.long2String(sl.getMaxHeapMB() <td><% TraditionalBinaryPrefix.long2String((long) sl.getMaxHeapSize().get(Size.Unit.MEGABYTE)
* TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td> * TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
<td><% TraditionalBinaryPrefix.long2String(sl.getMemStoreSizeMB() <td><% TraditionalBinaryPrefix.long2String(memStoreSizeMB
* TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td> * TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
</tr> </tr>
@ -189,15 +194,23 @@ for (ServerName serverName: serverNames) {
<%java> <%java>
for (ServerName serverName: serverNames) { for (ServerName serverName: serverNames) {
ServerLoad sl = master.getServerManager().getLoad(serverName); ServerMetrics sl = master.getServerManager().getLoad(serverName);
if (sl != null) { if (sl != null) {
long readRequestCount = 0;
long writeRequestCount = 0;
long filteredReadRequestCount = 0;
for (RegionMetrics rl : sl.getRegionMetrics().values()) {
readRequestCount += rl.getReadRequestCount();
writeRequestCount += rl.getWriteRequestCount();
filteredReadRequestCount += rl.getFilteredReadRequestCount();
}
</%java> </%java>
<tr> <tr>
<td><& serverNameLink; serverName=serverName; serverLoad = sl; &></td> <td><& serverNameLink; serverName=serverName; serverLoad = sl; &></td>
<td><% String.format("%.0f", sl.getRequestsPerSecond()) %></td> <td><% sl.getRequestCountPerSecond() %></td>
<td><% sl.getReadRequestsCount() %></td> <td><% readRequestCount %></td>
<td><% sl.getFilteredReadRequestsCount() %></td> <td><% filteredReadRequestCount %></td>
<td><% sl.getWriteRequestsCount() %></td> <td><% writeRequestCount %></td>
</tr> </tr>
<%java> <%java>
} else { } else {
@ -228,20 +241,34 @@ if (sl != null) {
<%java> <%java>
for (ServerName serverName: serverNames) { for (ServerName serverName: serverNames) {
ServerLoad sl = master.getServerManager().getLoad(serverName); ServerMetrics sl = master.getServerManager().getLoad(serverName);
if (sl != null) { if (sl != null) {
long storeCount = 0;
long storeFileCount = 0;
long storeUncompressedSizeMB = 0;
long storeFileSizeMB = 0;
long totalStaticIndexSizeKB = 0;
long totalStaticBloomSizeKB = 0;
for (RegionMetrics rl : sl.getRegionMetrics().values()) {
storeCount += rl.getStoreCount();
storeFileCount += rl.getStoreFileCount();
storeUncompressedSizeMB += rl.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE);
storeFileSizeMB += rl.getStoreFileSize().get(Size.Unit.MEGABYTE);
totalStaticIndexSizeKB += rl.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE);
totalStaticBloomSizeKB += rl.getBloomFilterSize().get(Size.Unit.KILOBYTE);
}
</%java> </%java>
<tr> <tr>
<td><& serverNameLink; serverName=serverName; serverLoad = sl; &></td> <td><& serverNameLink; serverName=serverName; serverLoad = sl; &></td>
<td><% sl.getStores() %></td> <td><% storeCount %></td>
<td><% sl.getStorefiles() %></td> <td><% storeFileCount %></td>
<td><% TraditionalBinaryPrefix.long2String( <td><% TraditionalBinaryPrefix.long2String(
sl.getStoreUncompressedSizeMB() * TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td> storeUncompressedSizeMB * TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
<td><% TraditionalBinaryPrefix.long2String(sl.getStorefileSizeMB() <td><% TraditionalBinaryPrefix.long2String(storeFileSizeMB
* TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td> * TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
<td><% TraditionalBinaryPrefix.long2String(sl.getTotalStaticIndexSizeKB() <td><% TraditionalBinaryPrefix.long2String(totalStaticIndexSizeKB
* TraditionalBinaryPrefix.KILO.value, "B", 1) %></td> * TraditionalBinaryPrefix.KILO.value, "B", 1) %></td>
<td><% TraditionalBinaryPrefix.long2String(sl.getTotalStaticBloomSizeKB() <td><% TraditionalBinaryPrefix.long2String(totalStaticBloomSizeKB
* TraditionalBinaryPrefix.KILO.value, "B", 1) %></td> * TraditionalBinaryPrefix.KILO.value, "B", 1) %></td>
</tr> </tr>
<%java> <%java>
@ -270,19 +297,25 @@ if (sl != null) {
<%java> <%java>
for (ServerName serverName: serverNames) { for (ServerName serverName: serverNames) {
ServerLoad sl = master.getServerManager().getLoad(serverName); ServerMetrics sl = master.getServerManager().getLoad(serverName);
if (sl != null) { if (sl != null) {
long totalCompactingCells = 0;
long totalCompactedCells = 0;
for (RegionMetrics rl : sl.getRegionMetrics().values()) {
totalCompactingCells += rl.getCompactingCellCount();
totalCompactedCells += rl.getCompactedCellCount();
}
String percentDone = ""; String percentDone = "";
if (sl.getTotalCompactingKVs() > 0) { if (totalCompactingCells > 0) {
percentDone = String.format("%.2f", 100 * percentDone = String.format("%.2f", 100 *
((float) sl.getCurrentCompactedKVs() / sl.getTotalCompactingKVs())) + "%"; ((float) totalCompactedCells / totalCompactingCells)) + "%";
} }
</%java> </%java>
<tr> <tr>
<td><& serverNameLink; serverName=serverName; serverLoad = sl; &></td> <td><& serverNameLink; serverName=serverName; serverLoad = sl; &></td>
<td><% sl.getTotalCompactingKVs() %></td> <td><% totalCompactingCells %></td>
<td><% sl.getCurrentCompactedKVs() %></td> <td><% totalCompactedCells %></td>
<td><% sl.getTotalCompactingKVs() - sl.getCurrentCompactedKVs() %></td> <td><% totalCompactingCells - totalCompactedCells %></td>
<td><% percentDone %></td> <td><% percentDone %></td>
</tr> </tr>
<%java> <%java>
@ -300,7 +333,7 @@ if (sl.getTotalCompactingKVs() > 0) {
<%def serverNameLink> <%def serverNameLink>
<%args> <%args>
ServerName serverName; ServerName serverName;
ServerLoad serverLoad; ServerMetrics serverLoad;
</%args> </%args>
<%java> <%java>
int infoPort = master.getRegionServerInfoPort(serverName); int infoPort = master.getRegionServerInfoPort(serverName);

View File

@ -28,11 +28,10 @@ import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerMetrics;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfo;
@ -46,6 +45,7 @@ import org.apache.hadoop.hbase.util.Pair;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
import org.apache.hbase.thirdparty.com.google.common.collect.Sets; import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
@ -132,12 +132,12 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored
} }
//the region is currently on none of the favored nodes //the region is currently on none of the favored nodes
//get it on one of them if possible //get it on one of them if possible
ServerLoad l1 = super.services.getServerManager().getLoad( ServerMetrics l1 = super.services.getServerManager().getLoad(
serverNameWithoutCodeToServerName.get(favoredNodes.get(1))); serverNameWithoutCodeToServerName.get(favoredNodes.get(1)));
ServerLoad l2 = super.services.getServerManager().getLoad( ServerMetrics l2 = super.services.getServerManager().getLoad(
serverNameWithoutCodeToServerName.get(favoredNodes.get(2))); serverNameWithoutCodeToServerName.get(favoredNodes.get(2)));
if (l1 != null && l2 != null) { if (l1 != null && l2 != null) {
if (l1.getLoad() > l2.getLoad()) { if (l1.getRegionMetrics().size() > l2.getRegionMetrics().size()) {
destination = serverNameWithoutCodeToServerName.get(favoredNodes.get(2)); destination = serverNameWithoutCodeToServerName.get(favoredNodes.get(2));
} else { } else {
destination = serverNameWithoutCodeToServerName.get(favoredNodes.get(1)); destination = serverNameWithoutCodeToServerName.get(favoredNodes.get(1));
@ -296,9 +296,9 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored
// assign the region to the one with a lower load // assign the region to the one with a lower load
// (both have the desired hdfs blocks) // (both have the desired hdfs blocks)
ServerName s; ServerName s;
ServerLoad tertiaryLoad = super.services.getServerManager().getLoad(tertiaryHost); ServerMetrics tertiaryLoad = super.services.getServerManager().getLoad(tertiaryHost);
ServerLoad secondaryLoad = super.services.getServerManager().getLoad(secondaryHost); ServerMetrics secondaryLoad = super.services.getServerManager().getLoad(secondaryHost);
if (secondaryLoad.getLoad() < tertiaryLoad.getLoad()) { if (secondaryLoad.getRegionMetrics().size() < tertiaryLoad.getRegionMetrics().size()) {
s = secondaryHost; s = secondaryHost;
} else { } else {
s = tertiaryHost; s = tertiaryHost;

View File

@ -24,13 +24,10 @@ import java.io.PrintStream;
import java.io.PrintWriter; import java.io.PrintWriter;
import java.util.Date; import java.util.Date;
import java.util.Map; import java.util.Map;
import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpServletResponse;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerMetrics;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode; import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
@ -39,6 +36,7 @@ import org.apache.hadoop.hbase.monitoring.StateDumpServlet;
import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.regionserver.RSDumpServlet; import org.apache.hadoop.hbase.regionserver.RSDumpServlet;
import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Threads;
import org.apache.yetus.audience.InterfaceAudience;
@InterfaceAudience.Private @InterfaceAudience.Private
public class MasterDumpServlet extends StateDumpServlet { public class MasterDumpServlet extends StateDumpServlet {
@ -132,8 +130,8 @@ public class MasterDumpServlet extends StateDumpServlet {
return; return;
} }
Map<ServerName, ServerLoad> servers = sm.getOnlineServers(); Map<ServerName, ServerMetrics> servers = sm.getOnlineServers();
for (Map.Entry<ServerName, ServerLoad> e : servers.entrySet()) { for (Map.Entry<ServerName, ServerMetrics> e : servers.entrySet()) {
out.println(e.getKey() + ": " + e.getValue()); out.println(e.getKey() + ": " + e.getValue());
} }
} }

View File

@ -37,7 +37,8 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerMetrics;
import org.apache.hadoop.hbase.ServerMetricsBuilder;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.UnknownRegionException;
@ -102,6 +103,7 @@ import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter; import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
@ -451,16 +453,16 @@ public class MasterRpcServices extends RSRpcServices
master.checkServiceStarted(); master.checkServiceStarted();
ClusterStatusProtos.ServerLoad sl = request.getLoad(); ClusterStatusProtos.ServerLoad sl = request.getLoad();
ServerName serverName = ProtobufUtil.toServerName(request.getServer()); ServerName serverName = ProtobufUtil.toServerName(request.getServer());
ServerLoad oldLoad = master.getServerManager().getLoad(serverName); ServerMetrics oldLoad = master.getServerManager().getLoad(serverName);
ServerLoad newLoad = new ServerLoad(serverName, sl); ServerMetrics newLoad = ServerMetricsBuilder.toServerMetrics(serverName, sl);
master.getServerManager().regionServerReport(serverName, newLoad); master.getServerManager().regionServerReport(serverName, newLoad);
int version = VersionInfoUtil.getCurrentClientVersionNumber(); int version = VersionInfoUtil.getCurrentClientVersionNumber();
master.getAssignmentManager().reportOnlineRegions(serverName, master.getAssignmentManager().reportOnlineRegions(serverName,
version, newLoad.getRegionsLoad().keySet()); version, newLoad.getRegionMetrics().keySet());
if (sl != null && master.metricsMaster != null) { if (sl != null && master.metricsMaster != null) {
// Up our metrics. // Up our metrics.
master.metricsMaster.incrementRequests(sl.getTotalNumberOfRequests() master.metricsMaster.incrementRequests(sl.getTotalNumberOfRequests()
- (oldLoad != null ? oldLoad.getTotalNumberOfRequests() : 0)); - (oldLoad != null ? oldLoad.getRequestCount() : 0));
} }
} catch (IOException ioe) { } catch (IOException ioe) {
throw new ServiceException(ioe); throw new ServiceException(ioe);

View File

@ -37,13 +37,12 @@ import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Predicate; import java.util.function.Predicate;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ClockOutOfSyncException; import org.apache.hadoop.hbase.ClockOutOfSyncException;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.RegionLoad; import org.apache.hadoop.hbase.RegionMetrics;
import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerMetrics;
import org.apache.hadoop.hbase.ServerMetricsBuilder; import org.apache.hadoop.hbase.ServerMetricsBuilder;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.YouAreDeadException; import org.apache.hadoop.hbase.YouAreDeadException;
@ -62,8 +61,10 @@ import org.apache.yetus.audience.InterfaceAudience;
import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
@ -124,7 +125,8 @@ public class ServerManager {
storeFlushedSequenceIdsByRegion = new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR); storeFlushedSequenceIdsByRegion = new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR);
/** Map of registered servers to their current load */ /** Map of registered servers to their current load */
private final ConcurrentNavigableMap<ServerName, ServerLoad> onlineServers = new ConcurrentSkipListMap<>(); private final ConcurrentNavigableMap<ServerName, ServerMetrics> onlineServers =
new ConcurrentSkipListMap<>();
/** /**
* Map of admin interfaces per registered regionserver; these interfaces we use to control * Map of admin interfaces per registered regionserver; these interfaces we use to control
@ -240,7 +242,7 @@ public class ServerManager {
request.getServerStartCode()); request.getServerStartCode());
checkClockSkew(sn, request.getServerCurrentTime()); checkClockSkew(sn, request.getServerCurrentTime());
checkIsDead(sn, "STARTUP"); checkIsDead(sn, "STARTUP");
if (!checkAndRecordNewServer(sn, new ServerLoad(ServerMetricsBuilder.of(sn)))) { if (!checkAndRecordNewServer(sn, ServerMetricsBuilder.of(sn))) {
LOG.warn("THIS SHOULD NOT HAPPEN, RegionServerStartup" LOG.warn("THIS SHOULD NOT HAPPEN, RegionServerStartup"
+ " could not record the server: " + sn); + " could not record the server: " + sn);
} }
@ -252,12 +254,11 @@ public class ServerManager {
* @param sn * @param sn
* @param hsl * @param hsl
*/ */
private void updateLastFlushedSequenceIds(ServerName sn, ServerLoad hsl) { private void updateLastFlushedSequenceIds(ServerName sn, ServerMetrics hsl) {
Map<byte[], RegionLoad> regionsLoad = hsl.getRegionsLoad(); for (Entry<byte[], RegionMetrics> entry : hsl.getRegionMetrics().entrySet()) {
for (Entry<byte[], RegionLoad> entry : regionsLoad.entrySet()) {
byte[] encodedRegionName = Bytes.toBytes(RegionInfo.encodeRegionName(entry.getKey())); byte[] encodedRegionName = Bytes.toBytes(RegionInfo.encodeRegionName(entry.getKey()));
Long existingValue = flushedSequenceIdByRegion.get(encodedRegionName); Long existingValue = flushedSequenceIdByRegion.get(encodedRegionName);
long l = entry.getValue().getCompleteSequenceId(); long l = entry.getValue().getCompletedSequenceId();
// Don't let smaller sequence ids override greater sequence ids. // Don't let smaller sequence ids override greater sequence ids.
if (LOG.isTraceEnabled()) { if (LOG.isTraceEnabled()) {
LOG.trace(Bytes.toString(encodedRegionName) + ", existingValue=" + existingValue + LOG.trace(Bytes.toString(encodedRegionName) + ", existingValue=" + existingValue +
@ -273,10 +274,10 @@ public class ServerManager {
ConcurrentNavigableMap<byte[], Long> storeFlushedSequenceId = ConcurrentNavigableMap<byte[], Long> storeFlushedSequenceId =
computeIfAbsent(storeFlushedSequenceIdsByRegion, encodedRegionName, computeIfAbsent(storeFlushedSequenceIdsByRegion, encodedRegionName,
() -> new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR)); () -> new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR));
for (StoreSequenceId storeSeqId : entry.getValue().getStoreCompleteSequenceId()) { for (Entry<byte[], Long> storeSeqId : entry.getValue().getStoreSequenceId().entrySet()) {
byte[] family = storeSeqId.getFamilyName().toByteArray(); byte[] family = storeSeqId.getKey();
existingValue = storeFlushedSequenceId.get(family); existingValue = storeFlushedSequenceId.get(family);
l = storeSeqId.getSequenceId(); l = storeSeqId.getValue();
if (LOG.isTraceEnabled()) { if (LOG.isTraceEnabled()) {
LOG.trace(Bytes.toString(encodedRegionName) + ", family=" + Bytes.toString(family) + LOG.trace(Bytes.toString(encodedRegionName) + ", family=" + Bytes.toString(family) +
", existingValue=" + existingValue + ", completeSequenceId=" + l); ", existingValue=" + existingValue + ", completeSequenceId=" + l);
@ -291,7 +292,7 @@ public class ServerManager {
@VisibleForTesting @VisibleForTesting
public void regionServerReport(ServerName sn, public void regionServerReport(ServerName sn,
ServerLoad sl) throws YouAreDeadException { ServerMetrics sl) throws YouAreDeadException {
checkIsDead(sn, "REPORT"); checkIsDead(sn, "REPORT");
if (null == this.onlineServers.replace(sn, sl)) { if (null == this.onlineServers.replace(sn, sl)) {
// Already have this host+port combo and its just different start code? // Already have this host+port combo and its just different start code?
@ -316,7 +317,7 @@ public class ServerManager {
* @param sl the server load on the server * @param sl the server load on the server
* @return true if the server is recorded, otherwise, false * @return true if the server is recorded, otherwise, false
*/ */
boolean checkAndRecordNewServer(final ServerName serverName, final ServerLoad sl) { boolean checkAndRecordNewServer(final ServerName serverName, final ServerMetrics sl) {
ServerName existingServer = null; ServerName existingServer = null;
synchronized (this.onlineServers) { synchronized (this.onlineServers) {
existingServer = findServerWithSameHostnamePortWithLock(serverName); existingServer = findServerWithSameHostnamePortWithLock(serverName);
@ -423,7 +424,7 @@ public class ServerManager {
* @param serverName The remote servers name. * @param serverName The remote servers name.
*/ */
@VisibleForTesting @VisibleForTesting
void recordNewServerWithLock(final ServerName serverName, final ServerLoad sl) { void recordNewServerWithLock(final ServerName serverName, final ServerMetrics sl) {
LOG.info("Registering regionserver=" + serverName); LOG.info("Registering regionserver=" + serverName);
this.onlineServers.put(serverName, sl); this.onlineServers.put(serverName, sl);
this.rsAdmins.remove(serverName); this.rsAdmins.remove(serverName);
@ -447,9 +448,9 @@ public class ServerManager {
/** /**
* @param serverName * @param serverName
* @return ServerLoad if serverName is known else null * @return ServerMetrics if serverName is known else null
*/ */
public ServerLoad getLoad(final ServerName serverName) { public ServerMetrics getLoad(final ServerName serverName) {
return this.onlineServers.get(serverName); return this.onlineServers.get(serverName);
} }
@ -462,9 +463,9 @@ public class ServerManager {
public double getAverageLoad() { public double getAverageLoad() {
int totalLoad = 0; int totalLoad = 0;
int numServers = 0; int numServers = 0;
for (ServerLoad sl: this.onlineServers.values()) { for (ServerMetrics sl : this.onlineServers.values()) {
numServers++; numServers++;
totalLoad += sl.getNumberOfRegions(); totalLoad += sl.getRegionMetrics().size();
} }
return numServers == 0 ? 0 : return numServers == 0 ? 0 :
(double)totalLoad / (double)numServers; (double)totalLoad / (double)numServers;
@ -479,7 +480,7 @@ public class ServerManager {
/** /**
* @return Read-only map of servers to serverinfo * @return Read-only map of servers to serverinfo
*/ */
public Map<ServerName, ServerLoad> getOnlineServers() { public Map<ServerName, ServerMetrics> getOnlineServers() {
// Presumption is that iterating the returned Map is OK. // Presumption is that iterating the returned Map is OK.
synchronized (this.onlineServers) { synchronized (this.onlineServers) {
return Collections.unmodifiableMap(this.onlineServers); return Collections.unmodifiableMap(this.onlineServers);
@ -907,11 +908,11 @@ public class ServerManager {
* @return A copy of the internal list of online servers matched by the predicator * @return A copy of the internal list of online servers matched by the predicator
*/ */
public List<ServerName> getOnlineServersListWithPredicator(List<ServerName> keys, public List<ServerName> getOnlineServersListWithPredicator(List<ServerName> keys,
Predicate<ServerLoad> idleServerPredicator) { Predicate<ServerMetrics> idleServerPredicator) {
List<ServerName> names = new ArrayList<>(); List<ServerName> names = new ArrayList<>();
if (keys != null && idleServerPredicator != null) { if (keys != null && idleServerPredicator != null) {
keys.forEach(name -> { keys.forEach(name -> {
ServerLoad load = onlineServers.get(name); ServerMetrics load = onlineServers.get(name);
if (load != null) { if (load != null) {
if (idleServerPredicator.test(load)) { if (idleServerPredicator.test(load)) {
names.add(name); names.add(name);

View File

@ -1,4 +1,5 @@
/* /**
*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -33,7 +34,6 @@ import java.util.Set;
import java.util.TreeMap; import java.util.TreeMap;
import java.util.function.Predicate; import java.util.function.Predicate;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import org.apache.commons.lang3.NotImplementedException; import org.apache.commons.lang3.NotImplementedException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ClusterMetrics;
@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerMetrics;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfo;
@ -73,8 +73,8 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
private static final List<RegionInfo> EMPTY_REGION_LIST = new ArrayList<>(0); private static final List<RegionInfo> EMPTY_REGION_LIST = new ArrayList<>(0);
static final Predicate<ServerLoad> IDLE_SERVER_PREDICATOR static final Predicate<ServerMetrics> IDLE_SERVER_PREDICATOR
= load -> load.getNumberOfRegions() == 0; = load -> load.getRegionMetrics().isEmpty();
protected RegionLocationFinder regionFinder; protected RegionLocationFinder regionFinder;
protected boolean useRegionFinder; protected boolean useRegionFinder;

View File

@ -31,9 +31,8 @@ import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Map.Entry; import java.util.Map.Entry;
import java.util.Set; import java.util.Set;
import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerMetrics;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.favored.FavoredNodeAssignmentHelper; import org.apache.hadoop.hbase.favored.FavoredNodeAssignmentHelper;
@ -47,6 +46,7 @@ import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Pair;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
import org.apache.hbase.thirdparty.com.google.common.collect.Sets; import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
@ -274,10 +274,10 @@ public class FavoredStochasticBalancer extends StochasticLoadBalancer implements
// Assign the region to the one with a lower load (both have the desired hdfs blocks) // Assign the region to the one with a lower load (both have the desired hdfs blocks)
ServerName s; ServerName s;
ServerLoad tertiaryLoad = super.services.getServerManager().getLoad(tertiaryHost); ServerMetrics tertiaryLoad = super.services.getServerManager().getLoad(tertiaryHost);
ServerLoad secondaryLoad = super.services.getServerManager().getLoad(secondaryHost); ServerMetrics secondaryLoad = super.services.getServerManager().getLoad(secondaryHost);
if (secondaryLoad != null && tertiaryLoad != null) { if (secondaryLoad != null && tertiaryLoad != null) {
if (secondaryLoad.getLoad() < tertiaryLoad.getLoad()) { if (secondaryLoad.getRegionMetrics().size() < tertiaryLoad.getRegionMetrics().size()) {
s = secondaryHost; s = secondaryHost;
} else { } else {
s = tertiaryHost; s = tertiaryHost;

View File

@ -22,10 +22,10 @@ import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.Comparator; import java.util.Comparator;
import java.util.List; import java.util.List;
import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.RegionLoad; import org.apache.hadoop.hbase.RegionMetrics;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.Size;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfo;
@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
/** /**
@ -44,7 +45,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
* *
* <ol> * <ol>
* <li> Get all regions of a given table * <li> Get all regions of a given table
* <li> Get avg size S of each region (by total size of store files reported in RegionLoad) * <li> Get avg size S of each region (by total size of store files reported in RegionMetrics)
* <li> Seek every single region one by one. If a region R0 is bigger than S * 2, it is * <li> Seek every single region one by one. If a region R0 is bigger than S * 2, it is
* kindly requested to split. Thereon evaluate the next region R1 * kindly requested to split. Thereon evaluate the next region R1
* <li> Otherwise, if R0 + R1 is smaller than S, R0 and R1 are kindly requested to merge. * <li> Otherwise, if R0 + R1 is smaller than S, R0 and R1 are kindly requested to merge.
@ -204,12 +205,12 @@ public class SimpleRegionNormalizer implements RegionNormalizer {
private long getRegionSize(RegionInfo hri) { private long getRegionSize(RegionInfo hri) {
ServerName sn = masterServices.getAssignmentManager().getRegionStates(). ServerName sn = masterServices.getAssignmentManager().getRegionStates().
getRegionServerOfRegion(hri); getRegionServerOfRegion(hri);
RegionLoad regionLoad = masterServices.getServerManager().getLoad(sn). RegionMetrics regionLoad = masterServices.getServerManager().getLoad(sn).
getRegionsLoad().get(hri.getRegionName()); getRegionMetrics().get(hri.getRegionName());
if (regionLoad == null) { if (regionLoad == null) {
LOG.debug(hri.getRegionNameAsString() + " was not found in RegionsLoad"); LOG.debug(hri.getRegionNameAsString() + " was not found in RegionsLoad");
return -1; return -1;
} }
return regionLoad.getStorefileSizeMB(); return (long) regionLoad.getStoreFileSize().get(Size.Unit.MEGABYTE);
} }
} }

View File

@ -28,7 +28,6 @@
import="java.util.stream.Collectors" import="java.util.stream.Collectors"
import="org.apache.hadoop.hbase.HTableDescriptor" import="org.apache.hadoop.hbase.HTableDescriptor"
import="org.apache.hadoop.hbase.RSGroupTableAccessor" import="org.apache.hadoop.hbase.RSGroupTableAccessor"
import="org.apache.hadoop.hbase.ServerLoad"
import="org.apache.hadoop.hbase.ServerName" import="org.apache.hadoop.hbase.ServerName"
import="org.apache.hadoop.hbase.TableName" import="org.apache.hadoop.hbase.TableName"
import="org.apache.hadoop.hbase.client.Admin" import="org.apache.hadoop.hbase.client.Admin"
@ -42,6 +41,9 @@
import="org.apache.hadoop.hbase.util.Bytes" import="org.apache.hadoop.hbase.util.Bytes"
import="org.apache.hadoop.hbase.util.VersionInfo" import="org.apache.hadoop.hbase.util.VersionInfo"
import="org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix"%> import="org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix"%>
<%@ page import="org.apache.hadoop.hbase.ServerMetrics" %>
<%@ page import="org.apache.hadoop.hbase.Size" %>
<%@ page import="org.apache.hadoop.hbase.RegionMetrics" %>
<% <%
HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER); HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
String rsGroupName = request.getParameter("name"); String rsGroupName = request.getParameter("name");
@ -67,7 +69,7 @@
return 0; return 0;
}); });
Map<Address, ServerLoad> onlineServers = Collections.emptyMap(); Map<Address, ServerMetrics> onlineServers = Collections.emptyMap();
Map<Address, ServerName> serverMaping = Collections.emptyMap(); Map<Address, ServerName> serverMaping = Collections.emptyMap();
if (master.getServerManager() != null) { if (master.getServerManager() != null) {
onlineServers = master.getServerManager().getOnlineServers().entrySet().stream() onlineServers = master.getServerManager().getOnlineServers().entrySet().stream()
@ -141,7 +143,7 @@
for (Address server: rsGroupServers) { for (Address server: rsGroupServers) {
ServerName serverName = serverMaping.get(server); ServerName serverName = serverMaping.get(server);
if (serverName != null) { if (serverName != null) {
ServerLoad sl = onlineServers.get(server); ServerMetrics sl = onlineServers.get(server);
String version = master.getRegionServerVersion(serverName); String version = master.getRegionServerVersion(serverName);
if (!masterVersion.equals(version)) { if (!masterVersion.equals(version)) {
inconsistentNodeNum ++; inconsistentNodeNum ++;
@ -150,11 +152,11 @@
int numRegionsOnline = 0; int numRegionsOnline = 0;
long lastContact = 0; long lastContact = 0;
if (sl != null) { if (sl != null) {
requestsPerSecond = sl.getRequestsPerSecond(); requestsPerSecond = sl.getRequestCountPerSecond();
numRegionsOnline = sl.getNumberOfRegions(); numRegionsOnline = sl.getRegionMetrics().size();
totalRegions += sl.getNumberOfRegions(); totalRegions += sl.getRegionMetrics().size();
totalRequests += sl.getNumberOfRequests(); totalRequests += sl.getRequestCount();
lastContact = (System.currentTimeMillis() - sl.getReportTime())/1000; lastContact = (System.currentTimeMillis() - sl.getReportTimestamp())/1000;
} }
long startcode = serverName.getStartcode(); long startcode = serverName.getStartcode();
int infoPort = master.getRegionServerInfoPort(serverName); int infoPort = master.getRegionServerInfoPort(serverName);
@ -201,18 +203,21 @@
</tr> </tr>
<% for (Address server: rsGroupServers) { <% for (Address server: rsGroupServers) {
ServerName serverName = serverMaping.get(server); ServerName serverName = serverMaping.get(server);
ServerLoad sl = onlineServers.get(server); ServerMetrics sl = onlineServers.get(server);
if (sl != null && serverName != null) { if (sl != null && serverName != null) {
double memStoreSizeMB = sl.getRegionMetrics().values()
.stream().mapToDouble(rm -> rm.getMemStoreSize().get(Size.Unit.MEGABYTE))
.sum();
int infoPort = master.getRegionServerInfoPort(serverName); int infoPort = master.getRegionServerInfoPort(serverName);
String url = "//" + serverName.getHostname() + ":" + infoPort + "/rs-status"; String url = "//" + serverName.getHostname() + ":" + infoPort + "/rs-status";
%> %>
<tr> <tr>
<td><a href="<%= url %>"><%= serverName.getServerName() %></a></td> <td><a href="<%= url %>"><%= serverName.getServerName() %></a></td>
<td><%= TraditionalBinaryPrefix.long2String(sl.getUsedHeapMB() <td><%= TraditionalBinaryPrefix.long2String((long) sl.getUsedHeapSize().get(Size.Unit.MEGABYTE)
* TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td> * TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
<td><%= TraditionalBinaryPrefix.long2String(sl.getMaxHeapMB() <td><%= TraditionalBinaryPrefix.long2String((long) sl.getMaxHeapSize().get(Size.Unit.MEGABYTE)
* TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td> * TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
<td><%= TraditionalBinaryPrefix.long2String(sl.getMemstoreSizeInMB() <td><%= TraditionalBinaryPrefix.long2String((long) memStoreSizeMB
* TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td> * TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
</tr> </tr>
<% } else { %> <% } else { %>
@ -236,16 +241,22 @@
</tr> </tr>
<% for (Address server: rsGroupServers) { <% for (Address server: rsGroupServers) {
ServerName serverName = serverMaping.get(server); ServerName serverName = serverMaping.get(server);
ServerLoad sl = onlineServers.get(server); ServerMetrics sl = onlineServers.get(server);
if (sl != null && serverName != null) { if (sl != null && serverName != null) {
int infoPort = master.getRegionServerInfoPort(serverName); int infoPort = master.getRegionServerInfoPort(serverName);
long readRequestCount = 0;
long writeRequestCount = 0;
for (RegionMetrics rm : sl.getRegionMetrics().values()) {
readRequestCount += rm.getReadRequestCount();
writeRequestCount += rm.getWriteRequestCount();
}
String url = "//" + serverName.getHostname() + ":" + infoPort + "/rs-status"; String url = "//" + serverName.getHostname() + ":" + infoPort + "/rs-status";
%> %>
<tr> <tr>
<td><a href="<%= url %>"><%= serverName.getServerName() %></a></td> <td><a href="<%= url %>"><%= serverName.getServerName() %></a></td>
<td><%= String.format("%.0f", sl.getRequestsPerSecond()) %></td> <td><%= String.format("%.0f", sl.getRequestCountPerSecond()) %></td>
<td><%= sl.getReadRequestsCount() %></td> <td><%= readRequestCount %></td>
<td><%= sl.getWriteRequestsCount() %></td> <td><%= writeRequestCount %></td>
</tr> </tr>
<% } else { %> <% } else { %>
<tr> <tr>
@ -271,22 +282,36 @@
</tr> </tr>
<% for (Address server: rsGroupServers) { <% for (Address server: rsGroupServers) {
ServerName serverName = serverMaping.get(server); ServerName serverName = serverMaping.get(server);
ServerLoad sl = onlineServers.get(server); ServerMetrics sl = onlineServers.get(server);
if (sl != null && serverName != null) { if (sl != null && serverName != null) {
long storeCount = 0;
long storeFileCount = 0;
double storeUncompressedSizeMB = 0;
double storeFileSizeMB = 0;
double totalStaticIndexSizeKB = 0;
double totalStaticBloomSizeKB = 0;
for (RegionMetrics rm : sl.getRegionMetrics().values()) {
storeCount += rm.getStoreCount();
storeFileCount += rm.getStoreFileCount();
storeUncompressedSizeMB += rm.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE);
storeFileSizeMB += rm.getStoreFileSize().get(Size.Unit.MEGABYTE);
totalStaticIndexSizeKB += rm.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE);
totalStaticBloomSizeKB += rm.getBloomFilterSize().get(Size.Unit.KILOBYTE);
}
int infoPort = master.getRegionServerInfoPort(serverName); int infoPort = master.getRegionServerInfoPort(serverName);
String url = "//" + serverName.getHostname() + ":" + infoPort + "/rs-status"; String url = "//" + serverName.getHostname() + ":" + infoPort + "/rs-status";
%> %>
<tr> <tr>
<td><a href="<%= url %>"><%= serverName.getServerName() %></a></td> <td><a href="<%= url %>"><%= serverName.getServerName() %></a></td>
<td><%= sl.getStores() %></td> <td><%= storeCount %></td>
<td><%= sl.getStorefiles() %></td> <td><%= storeFileCount %></td>
<td><%= TraditionalBinaryPrefix.long2String( <td><%= TraditionalBinaryPrefix.long2String(
sl.getStoreUncompressedSizeMB() * TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td> (long) storeUncompressedSizeMB * TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
<td><%= TraditionalBinaryPrefix.long2String(sl.getStorefileSizeInMB() <td><%= TraditionalBinaryPrefix.long2String((long) storeFileSizeMB
* TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td> * TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
<td><%= TraditionalBinaryPrefix.long2String(sl.getTotalStaticIndexSizeKB() <td><%= TraditionalBinaryPrefix.long2String((long) totalStaticIndexSizeKB
* TraditionalBinaryPrefix.KILO.value, "B", 1) %></td> * TraditionalBinaryPrefix.KILO.value, "B", 1) %></td>
<td><%= TraditionalBinaryPrefix.long2String(sl.getTotalStaticBloomSizeKB() <td><%= TraditionalBinaryPrefix.long2String((long) totalStaticBloomSizeKB
* TraditionalBinaryPrefix.KILO.value, "B", 1) %></td> * TraditionalBinaryPrefix.KILO.value, "B", 1) %></td>
</tr> </tr>
<% } else { %> <% } else { %>
@ -314,21 +339,27 @@
</tr> </tr>
<% for (Address server: rsGroupServers) { <% for (Address server: rsGroupServers) {
ServerName serverName = serverMaping.get(server); ServerName serverName = serverMaping.get(server);
ServerLoad sl = onlineServers.get(server); ServerMetrics sl = onlineServers.get(server);
if (sl != null && serverName != null) { if (sl != null && serverName != null) {
long totalCompactingCells = 0;
long currentCompactedCells = 0;
for (RegionMetrics rm : sl.getRegionMetrics().values()) {
totalCompactingCells += rm.getCompactingCellCount();
currentCompactedCells += rm.getCompactedCellCount();
}
String percentDone = ""; String percentDone = "";
if (sl.getTotalCompactingKVs() > 0) { if (totalCompactingCells > 0) {
percentDone = String.format("%.2f", 100 * percentDone = String.format("%.2f", 100 *
((float) sl.getCurrentCompactedKVs() / sl.getTotalCompactingKVs())) + "%"; ((float) currentCompactedCells / totalCompactingCells)) + "%";
} }
int infoPort = master.getRegionServerInfoPort(serverName); int infoPort = master.getRegionServerInfoPort(serverName);
String url = "//" + serverName.getHostname() + ":" + infoPort + "/rs-status"; String url = "//" + serverName.getHostname() + ":" + infoPort + "/rs-status";
%> %>
<tr> <tr>
<td><a href="<%= url %>"><%= serverName.getServerName() %></a></td> <td><a href="<%= url %>"><%= serverName.getServerName() %></a></td>
<td><%= sl.getTotalCompactingKVs() %></td> <td><%= totalCompactingCells %></td>
<td><%= sl.getCurrentCompactedKVs() %></td> <td><%= currentCompactedCells %></td>
<td><%= sl.getTotalCompactingKVs() - sl.getCurrentCompactedKVs() %></td> <td><%= totalCompactingCells - currentCompactedCells %></td>
<td><%= percentDone %></td> <td><%= percentDone %></td>
</tr> </tr>
<% } else { %> <% } else { %>

View File

@ -34,8 +34,6 @@
import="org.apache.hadoop.hbase.HColumnDescriptor" import="org.apache.hadoop.hbase.HColumnDescriptor"
import="org.apache.hadoop.hbase.HConstants" import="org.apache.hadoop.hbase.HConstants"
import="org.apache.hadoop.hbase.HRegionLocation" import="org.apache.hadoop.hbase.HRegionLocation"
import="org.apache.hadoop.hbase.RegionLoad"
import="org.apache.hadoop.hbase.ServerLoad"
import="org.apache.hadoop.hbase.ServerName" import="org.apache.hadoop.hbase.ServerName"
import="org.apache.hadoop.hbase.TableName" import="org.apache.hadoop.hbase.TableName"
import="org.apache.hadoop.hbase.TableNotFoundException" import="org.apache.hadoop.hbase.TableNotFoundException"
@ -60,16 +58,20 @@
<%@ page import="org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos" %> <%@ page import="org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos" %>
<%@ page import="org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas" %> <%@ page import="org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas" %>
<%@ page import="org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota" %> <%@ page import="org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota" %>
<%@ page import="org.apache.hadoop.hbase.ServerMetrics" %>
<%@ page import="org.apache.hadoop.hbase.RegionMetrics" %>
<%@ page import="org.apache.hadoop.hbase.Size" %>
<%@ page import="org.apache.hadoop.hbase.RegionMetricsBuilder" %>
<%! <%!
/** /**
* @return An empty region load stamped with the passed in <code>regionInfo</code> * @return An empty region load stamped with the passed in <code>regionInfo</code>
* region name. * region name.
*/ */
private RegionLoad getEmptyRegionLoad(final RegionInfo regionInfo) { private RegionMetrics getEmptyRegionMetrics(final RegionInfo regionInfo) {
return new RegionLoad(ClusterStatusProtos.RegionLoad.newBuilder(). return RegionMetricsBuilder.toRegionMetrics(ClusterStatusProtos.RegionLoad.newBuilder().
setRegionSpecifier(HBaseProtos.RegionSpecifier.newBuilder(). setRegionSpecifier(HBaseProtos.RegionSpecifier.newBuilder().
setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME). setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME).
setValue(ByteString.copyFrom(regionInfo.getRegionName())).build()).build()); setValue(ByteString.copyFrom(regionInfo.getRegionName())).build()).build());
} }
%> %>
<% <%
@ -87,7 +89,6 @@
Table table; Table table;
String tableHeader; String tableHeader;
boolean withReplica = false; boolean withReplica = false;
ServerName rl = metaTableLocator.getMetaRegionLocation(master.getZooKeeper());
boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false); boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false);
boolean readOnly = conf.getBoolean("hbase.master.ui.readonly", false); boolean readOnly = conf.getBoolean("hbase.master.ui.readonly", false);
int numMetaReplicas = conf.getInt(HConstants.META_REPLICAS_NUM, int numMetaReplicas = conf.getInt(HConstants.META_REPLICAS_NUM,
@ -216,18 +217,18 @@ if ( fqtn != null ) {
float locality = 0.0f; float locality = 0.0f;
if (metaLocation != null) { if (metaLocation != null) {
ServerLoad sl = master.getServerManager().getLoad(metaLocation); ServerMetrics sl = master.getServerManager().getLoad(metaLocation);
// The host name portion should be safe, but I don't know how we handle IDNs so err on the side of failing safely. // The host name portion should be safe, but I don't know how we handle IDNs so err on the side of failing safely.
hostAndPort = URLEncoder.encode(metaLocation.getHostname()) + ":" + master.getRegionServerInfoPort(metaLocation); hostAndPort = URLEncoder.encode(metaLocation.getHostname()) + ":" + master.getRegionServerInfoPort(metaLocation);
if (sl != null) { if (sl != null) {
Map<byte[], RegionLoad> map = sl.getRegionsLoad(); Map<byte[], RegionMetrics> map = sl.getRegionMetrics();
if (map.containsKey(meta.getRegionName())) { if (map.containsKey(meta.getRegionName())) {
RegionLoad load = map.get(meta.getRegionName()); RegionMetrics load = map.get(meta.getRegionName());
readReq = String.format("%,1d", load.getReadRequestsCount()); readReq = String.format("%,1d", load.getReadRequestCount());
writeReq = String.format("%,1d", load.getWriteRequestsCount()); writeReq = String.format("%,1d", load.getWriteRequestCount());
fileSize = StringUtils.byteDesc(load.getStorefileSizeMB()*1024l*1024); fileSize = StringUtils.byteDesc((long) load.getStoreFileSize().get(Size.Unit.BYTE));
fileCount = String.format("%,1d", load.getStorefiles()); fileCount = String.format("%,1d", load.getStoreFileCount());
memSize = StringUtils.byteDesc(load.getMemStoreSizeMB()*1024l*1024); memSize = StringUtils.byteDesc((long) load.getMemStoreSize().get(Size.Unit.BYTE));
locality = load.getDataLocality(); locality = load.getDataLocality();
} }
} }
@ -400,7 +401,7 @@ if ( fqtn != null ) {
Map<ServerName, Integer> regDistribution = new TreeMap<>(); Map<ServerName, Integer> regDistribution = new TreeMap<>();
Map<ServerName, Integer> primaryRegDistribution = new TreeMap<>(); Map<ServerName, Integer> primaryRegDistribution = new TreeMap<>();
List<HRegionLocation> regions = r.getAllRegionLocations(); List<HRegionLocation> regions = r.getAllRegionLocations();
Map<RegionInfo, RegionLoad> regionsToLoad = new LinkedHashMap<>(); Map<RegionInfo, RegionMetrics> regionsToLoad = new LinkedHashMap<>();
Map<RegionInfo, ServerName> regionsToServer = new LinkedHashMap<>(); Map<RegionInfo, ServerName> regionsToServer = new LinkedHashMap<>();
for (HRegionLocation hriEntry : regions) { for (HRegionLocation hriEntry : regions) {
RegionInfo regionInfo = hriEntry.getRegionInfo(); RegionInfo regionInfo = hriEntry.getRegionInfo();
@ -408,28 +409,27 @@ if ( fqtn != null ) {
regionsToServer.put(regionInfo, addr); regionsToServer.put(regionInfo, addr);
if (addr != null) { if (addr != null) {
ServerLoad sl = master.getServerManager().getLoad(addr); ServerMetrics sl = master.getServerManager().getLoad(addr);
if (sl != null) { if (sl != null) {
Map<byte[], RegionLoad> map = sl.getRegionsLoad(); RegionMetrics regionMetrics = sl.getRegionMetrics().get(regionInfo.getRegionName());
RegionLoad regionload = map.get(regionInfo.getRegionName()); regionsToLoad.put(regionInfo, regionMetrics);
regionsToLoad.put(regionInfo, regionload); if(regionMetrics != null) {
if(regionload != null) { totalReadReq += regionMetrics.getReadRequestCount();
totalReadReq += regionload.getReadRequestsCount(); totalWriteReq += regionMetrics.getWriteRequestCount();
totalWriteReq += regionload.getWriteRequestsCount(); totalSize += regionMetrics.getStoreFileSize().get(Size.Unit.MEGABYTE);
totalSize += regionload.getStorefileSizeMB(); totalStoreFileCount += regionMetrics.getStoreFileCount();
totalStoreFileCount += regionload.getStorefiles(); totalMemSize += regionMetrics.getMemStoreSize().get(Size.Unit.MEGABYTE);
totalMemSize += regionload.getMemStoreSizeMB(); totalStoreFileSizeMB += regionMetrics.getStoreFileSize().get(Size.Unit.MEGABYTE);
totalStoreFileSizeMB += regionload.getStorefileSizeMB();
} else { } else {
RegionLoad load0 = getEmptyRegionLoad(regionInfo); RegionMetrics load0 = getEmptyRegionMetrics(regionInfo);
regionsToLoad.put(regionInfo, load0); regionsToLoad.put(regionInfo, load0);
} }
} else{ } else{
RegionLoad load0 = getEmptyRegionLoad(regionInfo); RegionMetrics load0 = getEmptyRegionMetrics(regionInfo);
regionsToLoad.put(regionInfo, load0); regionsToLoad.put(regionInfo, load0);
} }
} else { } else {
RegionLoad load0 = getEmptyRegionLoad(regionInfo); RegionMetrics load0 = getEmptyRegionMetrics(regionInfo);
regionsToLoad.put(regionInfo, load0); regionsToLoad.put(regionInfo, load0);
} }
} }
@ -474,156 +474,92 @@ ShowDetailName&Start/End Key<input type="checkbox" id="showWhole" style="margin-
</tr> </tr>
<% <%
List<Map.Entry<RegionInfo, RegionLoad>> entryList = new ArrayList<>(regionsToLoad.entrySet()); List<Map.Entry<RegionInfo, RegionMetrics>> entryList = new ArrayList<>(regionsToLoad.entrySet());
if(sortKey != null) { if(sortKey != null) {
if (sortKey.equals("readrequest")) { if (sortKey.equals("readrequest")) {
Collections.sort(entryList, Collections.sort(entryList, (entry1, entry2) -> {
new Comparator<Map.Entry<RegionInfo, RegionLoad>>() { if (entry1 == null || entry1.getValue() == null) {
public int compare( return -1;
Map.Entry<RegionInfo, RegionLoad> entry1, } else if (entry2 == null || entry2.getValue() == null) {
Map.Entry<RegionInfo, RegionLoad> entry2) { return 1;
if (entry1 == null || entry1.getValue() == null) { }
return -1; int result = Long.compare(entry1.getValue().getReadRequestCount(),
} else if (entry2 == null || entry2.getValue() == null) { entry2.getValue().getReadRequestCount());
return 1; if (reverseOrder) {
} result = -1 * result;
int result = 0; }
if (entry1.getValue().getReadRequestsCount() < entry2.getValue().getReadRequestsCount()) { return result;
result = -1; });
} else if (entry1.getValue().getReadRequestsCount() > entry2.getValue().getReadRequestsCount()) {
result = 1;
}
if (reverseOrder) {
result = -1 * result;
}
return result;
}
});
} else if (sortKey.equals("writerequest")) { } else if (sortKey.equals("writerequest")) {
Collections.sort(entryList, Collections.sort(entryList, (entry1, entry2) -> {
new Comparator<Map.Entry<RegionInfo, RegionLoad>>() { if (entry1 == null || entry1.getValue() == null) {
public int compare( return -1;
Map.Entry<RegionInfo, RegionLoad> entry1, } else if (entry2 == null || entry2.getValue() == null) {
Map.Entry<RegionInfo, RegionLoad> entry2) { return 1;
if (entry1 == null || entry1.getValue() == null) { }
return -1; int result = Long.compare(entry1.getValue().getWriteRequestCount(),
} else if (entry2 == null || entry2.getValue() == null) { entry2.getValue().getWriteRequestCount());
return 1; if (reverseOrder) {
} result = -1 * result;
int result = 0; }
if (entry1.getValue().getWriteRequestsCount() < entry2.getValue() return result;
.getWriteRequestsCount()) { });
result = -1;
} else if (entry1.getValue().getWriteRequestsCount() > entry2.getValue()
.getWriteRequestsCount()) {
result = 1;
}
if (reverseOrder) {
result = -1 * result;
}
return result;
}
});
} else if (sortKey.equals("size")) { } else if (sortKey.equals("size")) {
Collections.sort(entryList, Collections.sort(entryList, (entry1, entry2) -> {
new Comparator<Map.Entry<RegionInfo, RegionLoad>>() { if (entry1 == null || entry1.getValue() == null) {
public int compare( return -1;
Map.Entry<RegionInfo, RegionLoad> entry1, } else if (entry2 == null || entry2.getValue() == null) {
Map.Entry<RegionInfo, RegionLoad> entry2) { return 1;
if (entry1 == null || entry1.getValue() == null) { }
return -1; int result = Double.compare(entry1.getValue().getStoreFileSize().get(),
} else if (entry2 == null || entry2.getValue() == null) { entry2.getValue().getStoreFileSize().get());
return 1; if (reverseOrder) {
} result = -1 * result;
int result = 0; }
if (entry1.getValue().getStorefileSizeMB() < entry2.getValue() return result;
.getStorefileSizeMB()) { });
result = -1;
} else if (entry1.getValue().getStorefileSizeMB() > entry2
.getValue().getStorefileSizeMB()) {
result = 1;
}
if (reverseOrder) {
result = -1 * result;
}
return result;
}
});
} else if (sortKey.equals("filecount")) { } else if (sortKey.equals("filecount")) {
Collections.sort(entryList, Collections.sort(entryList, (entry1, entry2) -> {
new Comparator<Map.Entry<RegionInfo, RegionLoad>>() { if (entry1 == null || entry1.getValue() == null) {
public int compare( return -1;
Map.Entry<RegionInfo, RegionLoad> entry1, } else if (entry2 == null || entry2.getValue() == null) {
Map.Entry<RegionInfo, RegionLoad> entry2) { return 1;
if (entry1 == null || entry1.getValue() == null) { }
return -1; int result = Integer.compare(entry1.getValue().getStoreCount(),
} else if (entry2 == null || entry2.getValue() == null) { entry2.getValue().getStoreCount());
return 1; if (reverseOrder) {
} result = -1 * result;
int result = 0; }
if (entry1.getValue().getStorefiles() < entry2.getValue() return result;
.getStorefiles()) { });
result = -1;
} else if (entry1.getValue().getStorefiles() > entry2.getValue()
.getStorefiles()) {
result = 1;
}
if (reverseOrder) {
result = -1 * result;
}
return result;
}
});
} else if (sortKey.equals("memstore")) { } else if (sortKey.equals("memstore")) {
Collections.sort(entryList, Collections.sort(entryList, (entry1, entry2) -> {
new Comparator<Map.Entry<RegionInfo, RegionLoad>>() { if (entry1 == null || entry1.getValue() == null) {
public int compare( return -1;
Map.Entry<RegionInfo, RegionLoad> entry1, } else if (entry2 == null || entry2.getValue() == null) {
Map.Entry<RegionInfo, RegionLoad> entry2) { return 1;
if (entry1 == null || entry1.getValue()==null) { }
return -1; int result = Double.compare(entry1.getValue().getMemStoreSize().get(),
} else if (entry2 == null || entry2.getValue()==null) { entry2.getValue().getMemStoreSize().get());
return 1; if (reverseOrder) {
} result = -1 * result;
int result = 0; }
if (entry1.getValue().getMemStoreSizeMB() < entry2.getValue() return result;
.getMemStoreSizeMB()) { });
result = -1;
} else if (entry1.getValue().getMemStoreSizeMB() > entry2
.getValue().getMemStoreSizeMB()) {
result = 1;
}
if (reverseOrder) {
result = -1 * result;
}
return result;
}
});
} else if (sortKey.equals("locality")) { } else if (sortKey.equals("locality")) {
Collections.sort(entryList, Collections.sort(entryList, (entry1, entry2) -> {
new Comparator<Map.Entry<RegionInfo, RegionLoad>>() { if (entry1 == null || entry1.getValue() == null) {
public int compare( return -1;
Map.Entry<RegionInfo, RegionLoad> entry1, } else if (entry2 == null || entry2.getValue() == null) {
Map.Entry<RegionInfo, RegionLoad> entry2) { return 1;
if (entry1 == null || entry1.getValue()==null) { }
return -1; int result = Double.compare(entry1.getValue().getDataLocality(),
} else if (entry2 == null || entry2.getValue()==null) { entry2.getValue().getDataLocality());
return 1; if (reverseOrder) {
} result = -1 * result;
int result = 0; }
if (entry1.getValue().getDataLocality() < entry2.getValue() return result;
.getDataLocality()) { });
result = -1;
} else if (entry1.getValue().getDataLocality() > entry2
.getValue().getDataLocality()) {
result = 1;
}
if (reverseOrder) {
result = -1 * result;
}
return result;
}
});
} }
} }
numRegions = regions.size(); numRegions = regions.size();
@ -632,10 +568,10 @@ ShowDetailName&Start/End Key<input type="checkbox" id="showWhole" style="margin-
if (numRegionsToRender < 0) { if (numRegionsToRender < 0) {
numRegionsToRender = numRegions; numRegionsToRender = numRegions;
} }
for (Map.Entry<RegionInfo, RegionLoad> hriEntry : entryList) { for (Map.Entry<RegionInfo, RegionMetrics> hriEntry : entryList) {
RegionInfo regionInfo = hriEntry.getKey(); RegionInfo regionInfo = hriEntry.getKey();
ServerName addr = regionsToServer.get(regionInfo); ServerName addr = regionsToServer.get(regionInfo);
RegionLoad load = hriEntry.getValue(); RegionMetrics load = hriEntry.getValue();
String readReq = "N/A"; String readReq = "N/A";
String writeReq = "N/A"; String writeReq = "N/A";
String regionSize = "N/A"; String regionSize = "N/A";
@ -644,11 +580,11 @@ ShowDetailName&Start/End Key<input type="checkbox" id="showWhole" style="margin-
float locality = 0.0f; float locality = 0.0f;
String state = "N/A"; String state = "N/A";
if(load != null) { if(load != null) {
readReq = String.format("%,1d", load.getReadRequestsCount()); readReq = String.format("%,1d", load.getReadRequestCount());
writeReq = String.format("%,1d", load.getWriteRequestsCount()); writeReq = String.format("%,1d", load.getWriteRequestCount());
regionSize = StringUtils.byteDesc(load.getStorefileSizeMB()*1024l*1024); regionSize = StringUtils.byteDesc((long) load.getStoreFileSize().get(Size.Unit.BYTE));
fileCount = String.format("%,1d", load.getStorefiles()); fileCount = String.format("%,1d", load.getStoreFileCount());
memSize = StringUtils.byteDesc(load.getMemStoreSizeMB()*1024l*1024); memSize = StringUtils.byteDesc((long) load.getMemStoreSize().get(Size.Unit.BYTE));
locality = load.getDataLocality(); locality = load.getDataLocality();
} }
@ -657,7 +593,7 @@ ShowDetailName&Start/End Key<input type="checkbox" id="showWhole" style="margin-
} }
if (addr != null) { if (addr != null) {
ServerLoad sl = master.getServerManager().getLoad(addr); ServerMetrics sl = master.getServerManager().getLoad(addr);
// This port might be wrong if RS actually ended up using something else. // This port might be wrong if RS actually ended up using something else.
urlRegionServer = urlRegionServer =
"//" + URLEncoder.encode(addr.getHostname()) + ":" + master.getRegionServerInfoPort(addr) + "/"; "//" + URLEncoder.encode(addr.getHostname()) + ":" + master.getRegionServerInfoPort(addr) + "/";

View File

@ -29,8 +29,9 @@ import java.util.List;
import java.util.Map; import java.util.Map;
import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.RegionLoad; import org.apache.hadoop.hbase.RegionMetrics;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.Size;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.RegionInfoBuilder;
@ -363,15 +364,16 @@ public class TestSimpleRegionNormalizer {
getRegionServerOfRegion(any())).thenReturn(sn); getRegionServerOfRegion(any())).thenReturn(sn);
for (Map.Entry<byte[], Integer> region : regionSizes.entrySet()) { for (Map.Entry<byte[], Integer> region : regionSizes.entrySet()) {
RegionLoad regionLoad = Mockito.mock(RegionLoad.class); RegionMetrics regionLoad = Mockito.mock(RegionMetrics.class);
when(regionLoad.getName()).thenReturn(region.getKey()); when(regionLoad.getRegionName()).thenReturn(region.getKey());
when(regionLoad.getStorefileSizeMB()).thenReturn(region.getValue()); when(regionLoad.getStoreFileSize())
.thenReturn(new Size(region.getValue(), Size.Unit.MEGABYTE));
// this is possibly broken with jdk9, unclear if false positive or not // this is possibly broken with jdk9, unclear if false positive or not
// suppress it for now, fix it when we get to running tests on 9 // suppress it for now, fix it when we get to running tests on 9
// see: http://errorprone.info/bugpattern/MockitoCast // see: http://errorprone.info/bugpattern/MockitoCast
when((Object) masterServices.getServerManager().getLoad(sn). when((Object) masterServices.getServerManager().getLoad(sn).
getRegionsLoad().get(region.getKey())).thenReturn(regionLoad); getRegionMetrics().get(region.getKey())).thenReturn(regionLoad);
} }
try { try {
when(masterRpcServices.isSplitOrMergeEnabled(any(), when(masterRpcServices.isSplitOrMergeEnabled(any(),