HBASE-20093 Replace ServerLoad by ServerMetrics for ServerManager
Signed-off-by: tedyu <yuzhihong@gmail.com>
This commit is contained in:
parent
5317ca92bc
commit
c459282fe0
|
@ -22,8 +22,14 @@ import static org.junit.Assert.assertFalse;
|
|||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.*;
|
||||
import java.util.*;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
@ -34,8 +40,8 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
|||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||
import org.apache.hadoop.hbase.RegionLoad;
|
||||
import org.apache.hadoop.hbase.ServerLoad;
|
||||
import org.apache.hadoop.hbase.RegionMetrics;
|
||||
import org.apache.hadoop.hbase.ServerMetrics;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
|
@ -47,8 +53,10 @@ import org.apache.hadoop.hbase.testclassification.MediumTests;
|
|||
import org.apache.hadoop.hbase.util.ClassLoaderTestHelper;
|
||||
import org.apache.hadoop.hbase.util.CoprocessorClassLoader;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.junit.*;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -482,13 +490,13 @@ public class TestClassLoading {
|
|||
* @param tableName : given table.
|
||||
* @return subset of all servers.
|
||||
*/
|
||||
Map<ServerName, ServerLoad> serversForTable(String tableName) {
|
||||
Map<ServerName, ServerLoad> serverLoadHashMap = new HashMap<>();
|
||||
for(Map.Entry<ServerName,ServerLoad> server:
|
||||
Map<ServerName, ServerMetrics> serversForTable(String tableName) {
|
||||
Map<ServerName, ServerMetrics> serverLoadHashMap = new HashMap<>();
|
||||
for(Map.Entry<ServerName, ServerMetrics> server:
|
||||
TEST_UTIL.getMiniHBaseCluster().getMaster().getServerManager().
|
||||
getOnlineServers().entrySet()) {
|
||||
for( Map.Entry<byte[], RegionLoad> region:
|
||||
server.getValue().getRegionsLoad().entrySet()) {
|
||||
for(Map.Entry<byte[], RegionMetrics> region:
|
||||
server.getValue().getRegionMetrics().entrySet()) {
|
||||
if (region.getValue().getNameAsString().equals(tableName)) {
|
||||
// this server hosts a region of tableName: add this server..
|
||||
serverLoadHashMap.put(server.getKey(),server.getValue());
|
||||
|
@ -501,8 +509,7 @@ public class TestClassLoading {
|
|||
}
|
||||
|
||||
void assertAllRegionServers(String tableName) throws InterruptedException {
|
||||
Map<ServerName, ServerLoad> servers;
|
||||
String[] actualCoprocessors = null;
|
||||
Map<ServerName, ServerMetrics> servers;
|
||||
boolean success = false;
|
||||
String[] expectedCoprocessors = regionServerSystemCoprocessors;
|
||||
if (tableName == null) {
|
||||
|
@ -513,8 +520,9 @@ public class TestClassLoading {
|
|||
}
|
||||
for (int i = 0; i < 5; i++) {
|
||||
boolean any_failed = false;
|
||||
for(Map.Entry<ServerName,ServerLoad> server: servers.entrySet()) {
|
||||
actualCoprocessors = server.getValue().getRsCoprocessors();
|
||||
for(Map.Entry<ServerName, ServerMetrics> server: servers.entrySet()) {
|
||||
String[] actualCoprocessors =
|
||||
server.getValue().getCoprocessorNames().stream().toArray(size -> new String[size]);
|
||||
if (!Arrays.equals(actualCoprocessors, expectedCoprocessors)) {
|
||||
LOG.debug("failed comparison: actual: " +
|
||||
Arrays.toString(actualCoprocessors) +
|
||||
|
|
|
@ -124,8 +124,8 @@ public class TestRSGroupsOfflineMode {
|
|||
LOG.info("Waiting for region unassignments on failover RS...");
|
||||
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
|
||||
@Override public boolean evaluate() throws Exception {
|
||||
return master.getServerManager().getLoad(failoverRS.getServerName())
|
||||
.getRegionsLoad().size() > 0;
|
||||
return !master.getServerManager().getLoad(failoverRS.getServerName())
|
||||
.getRegionMetrics().isEmpty();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
|
@ -30,7 +30,9 @@ ServerManager serverManager;
|
|||
java.util.Set;
|
||||
java.util.stream.Collectors;
|
||||
org.apache.hadoop.hbase.master.HMaster;
|
||||
org.apache.hadoop.hbase.ServerLoad;
|
||||
org.apache.hadoop.hbase.RegionMetrics;
|
||||
org.apache.hadoop.hbase.ServerMetrics;
|
||||
org.apache.hadoop.hbase.Size;
|
||||
org.apache.hadoop.hbase.RSGroupTableAccessor;
|
||||
org.apache.hadoop.hbase.master.ServerManager;
|
||||
org.apache.hadoop.hbase.net.Address;
|
||||
|
@ -45,7 +47,7 @@ List<RSGroupInfo> groups = RSGroupTableAccessor.getAllRSGroupInfo(master.getConn
|
|||
|
||||
<%java>
|
||||
RSGroupInfo [] rsGroupInfos = groups.toArray(new RSGroupInfo[groups.size()]);
|
||||
Map<Address, ServerLoad> collectServers = Collections.emptyMap();
|
||||
Map<Address, ServerMetrics> collectServers = Collections.emptyMap();
|
||||
if (master.getServerManager() != null) {
|
||||
collectServers =
|
||||
master.getServerManager().getOnlineServers().entrySet().stream()
|
||||
|
@ -85,7 +87,7 @@ if (master.getServerManager() != null) {
|
|||
<%def rsgroup_baseStats>
|
||||
<%args>
|
||||
RSGroupInfo [] rsGroupInfos;
|
||||
Map<Address, ServerLoad> collectServers;
|
||||
Map<Address, ServerMetrics> collectServers;
|
||||
</%args>
|
||||
<table class="table table-striped">
|
||||
<tr>
|
||||
|
@ -112,13 +114,13 @@ if (master.getServerManager() != null) {
|
|||
int numRegionsOnline = 0;
|
||||
Set<Address> servers = rsGroupInfo.getServers();
|
||||
for (Address server : servers) {
|
||||
ServerLoad sl = collectServers.get(server);
|
||||
ServerMetrics sl = collectServers.get(server);
|
||||
if (sl != null) {
|
||||
requestsPerSecond += sl.getNumberOfRequests();
|
||||
numRegionsOnline += sl.getNumberOfRegions();
|
||||
requestsPerSecond += sl.getRequestCountPerSecond();
|
||||
numRegionsOnline += sl.getRegionMetrics().size();
|
||||
//rsgroup total
|
||||
totalRegions += sl.getNumberOfRegions();
|
||||
totalRequests += sl.getNumberOfRequests();
|
||||
totalRegions += sl.getRegionMetrics().size();
|
||||
totalRequests += sl.getRequestCountPerSecond();
|
||||
totalOnlineServers++;
|
||||
onlineServers++;
|
||||
} else {
|
||||
|
@ -157,7 +159,7 @@ if (master.getServerManager() != null) {
|
|||
<%def rsgroup_memoryStats>
|
||||
<%args>
|
||||
RSGroupInfo [] rsGroupInfos;
|
||||
Map<Address, ServerLoad> collectServers;
|
||||
Map<Address, ServerMetrics> collectServers;
|
||||
</%args>
|
||||
<table class="table table-striped">
|
||||
<tr>
|
||||
|
@ -174,11 +176,12 @@ if (master.getServerManager() != null) {
|
|||
long maxHeap = 0;
|
||||
long memstoreSize = 0;
|
||||
for (Address server : rsGroupInfo.getServers()) {
|
||||
ServerLoad sl = collectServers.get(server);
|
||||
ServerMetrics sl = collectServers.get(server);
|
||||
if (sl != null) {
|
||||
usedHeap += sl.getUsedHeapMB();
|
||||
maxHeap += sl.getMaxHeapMB();
|
||||
memstoreSize += sl.getMemstoreSizeInMB();
|
||||
usedHeap += (long) sl.getUsedHeapSize().get(Size.Unit.MEGABYTE);
|
||||
maxHeap += (long) sl.getMaxHeapSize().get(Size.Unit.MEGABYTE);
|
||||
memstoreSize += (long) sl.getRegionMetrics().values().stream().mapToDouble(
|
||||
rm -> rm.getMemStoreSize().get(Size.Unit.MEGABYTE)).sum();
|
||||
}
|
||||
}
|
||||
</%java>
|
||||
|
@ -201,7 +204,7 @@ if (master.getServerManager() != null) {
|
|||
<%def rsgroup_requestStats>
|
||||
<%args>
|
||||
RSGroupInfo [] rsGroupInfos;
|
||||
Map<Address, ServerLoad> collectServers;
|
||||
Map<Address, ServerMetrics> collectServers;
|
||||
</%args>
|
||||
<table class="table table-striped">
|
||||
<tr>
|
||||
|
@ -217,11 +220,13 @@ if (master.getServerManager() != null) {
|
|||
long readRequests = 0;
|
||||
long writeRequests = 0;
|
||||
for (Address server : rsGroupInfo.getServers()) {
|
||||
ServerLoad sl = collectServers.get(server);
|
||||
ServerMetrics sl = collectServers.get(server);
|
||||
if (sl != null) {
|
||||
requestsPerSecond += sl.getNumberOfRequests();
|
||||
readRequests += sl.getReadRequestsCount();
|
||||
writeRequests += sl.getWriteRequestsCount();
|
||||
for (RegionMetrics rm : sl.getRegionMetrics().values()) {
|
||||
readRequests += rm.getReadRequestCount();
|
||||
writeRequests += rm.getWriteRequestCount();
|
||||
}
|
||||
requestsPerSecond += sl.getRequestCountPerSecond();
|
||||
}
|
||||
}
|
||||
</%java>
|
||||
|
@ -241,7 +246,7 @@ if (master.getServerManager() != null) {
|
|||
<%def rsgroup_storeStats>
|
||||
<%args>
|
||||
RSGroupInfo [] rsGroupInfos;
|
||||
Map<Address, ServerLoad> collectServers;
|
||||
Map<Address, ServerMetrics> collectServers;
|
||||
</%args>
|
||||
<table class="table table-striped">
|
||||
<tr>
|
||||
|
@ -264,14 +269,16 @@ if (master.getServerManager() != null) {
|
|||
long bloomSize = 0;
|
||||
int count = 0;
|
||||
for (Address server : rsGroupInfo.getServers()) {
|
||||
ServerLoad sl = collectServers.get(server);
|
||||
ServerMetrics sl = collectServers.get(server);
|
||||
if (sl != null) {
|
||||
numStores += sl.getStores();
|
||||
numStorefiles += sl.getStorefiles();
|
||||
uncompressedStorefileSize += sl.getStoreUncompressedSizeMB();
|
||||
storefileSize += sl.getStorefileSizeInMB();
|
||||
indexSize += sl.getTotalStaticIndexSizeKB();
|
||||
bloomSize += sl.getTotalStaticBloomSizeKB();
|
||||
for (RegionMetrics rm : sl.getRegionMetrics().values()) {
|
||||
numStores += rm.getStoreCount();
|
||||
numStorefiles += rm.getStoreFileCount();
|
||||
uncompressedStorefileSize += rm.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE);
|
||||
storefileSize += rm.getStoreFileSize().get(Size.Unit.MEGABYTE);
|
||||
indexSize += rm.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE);
|
||||
bloomSize += rm.getBloomFilterSize().get(Size.Unit.KILOBYTE);
|
||||
}
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
@ -298,7 +305,7 @@ if (master.getServerManager() != null) {
|
|||
<%def rsgroup_compactStats>
|
||||
<%args>
|
||||
RSGroupInfo [] rsGroupInfos;
|
||||
Map<Address, ServerLoad> collectServers;
|
||||
Map<Address, ServerMetrics> collectServers;
|
||||
</%args>
|
||||
<table class="table table-striped">
|
||||
<tr>
|
||||
|
@ -312,28 +319,30 @@ if (master.getServerManager() != null) {
|
|||
for (RSGroupInfo rsGroupInfo: rsGroupInfos) {
|
||||
String rsGroupName = rsGroupInfo.getName();
|
||||
int numStores = 0;
|
||||
long totalCompactingKVs = 0;
|
||||
long numCompactedKVs = 0;
|
||||
long totalCompactingCells = 0;
|
||||
long totalCompactedCells = 0;
|
||||
long remainingKVs = 0;
|
||||
long compactionProgress = 0;
|
||||
for (Address server : rsGroupInfo.getServers()) {
|
||||
ServerLoad sl = collectServers.get(server);
|
||||
ServerMetrics sl = collectServers.get(server);
|
||||
if (sl != null) {
|
||||
totalCompactingKVs += sl.getTotalCompactingKVs();
|
||||
numCompactedKVs += sl.getCurrentCompactedKVs();
|
||||
for (RegionMetrics rl : sl.getRegionMetrics().values()) {
|
||||
totalCompactingCells += rl.getCompactingCellCount();
|
||||
totalCompactedCells += rl.getCompactedCellCount();
|
||||
}
|
||||
}
|
||||
}
|
||||
remainingKVs = totalCompactingKVs - numCompactedKVs;
|
||||
remainingKVs = totalCompactingCells - totalCompactedCells;
|
||||
String percentDone = "";
|
||||
if (totalCompactingKVs > 0) {
|
||||
if (totalCompactingCells > 0) {
|
||||
percentDone = String.format("%.2f", 100 *
|
||||
((float) numCompactedKVs / totalCompactingKVs)) + "%";
|
||||
((float) totalCompactedCells / totalCompactingCells)) + "%";
|
||||
}
|
||||
</%java>
|
||||
<tr>
|
||||
<td><& rsGroupLink; rsGroupName=rsGroupName; &></td>
|
||||
<td><% totalCompactingKVs %></td>
|
||||
<td><% numCompactedKVs %></td>
|
||||
<td><% totalCompactingCells %></td>
|
||||
<td><% totalCompactedCells %></td>
|
||||
<td><% remainingKVs %></td>
|
||||
<td><% percentDone %></td>
|
||||
</tr>
|
||||
|
|
|
@ -26,8 +26,10 @@ HMaster master;
|
|||
<%import>
|
||||
java.util.*;
|
||||
org.apache.hadoop.hbase.master.HMaster;
|
||||
org.apache.hadoop.hbase.ServerLoad;
|
||||
org.apache.hadoop.hbase.RegionMetrics;
|
||||
org.apache.hadoop.hbase.ServerMetrics;
|
||||
org.apache.hadoop.hbase.ServerName;
|
||||
org.apache.hadoop.hbase.Size;
|
||||
org.apache.hadoop.hbase.util.VersionInfo;
|
||||
org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
|
||||
</%import>
|
||||
|
@ -84,12 +86,12 @@ Arrays.sort(serverNames);
|
|||
</tr>
|
||||
<%java>
|
||||
int totalRegions = 0;
|
||||
int totalRequests = 0;
|
||||
int totalRequestsPerSecond = 0;
|
||||
int inconsistentNodeNum = 0;
|
||||
String masterVersion = VersionInfo.getVersion();
|
||||
for (ServerName serverName: serverNames) {
|
||||
|
||||
ServerLoad sl = master.getServerManager().getLoad(serverName);
|
||||
ServerMetrics sl = master.getServerManager().getLoad(serverName);
|
||||
String version = master.getRegionServerVersion(serverName);
|
||||
if (!masterVersion.equals(version)) {
|
||||
inconsistentNodeNum ++;
|
||||
|
@ -100,12 +102,11 @@ Arrays.sort(serverNames);
|
|||
long lastContact = 0;
|
||||
|
||||
if (sl != null) {
|
||||
requestsPerSecond = sl.getRequestsPerSecond();
|
||||
numRegionsOnline = sl.getNumberOfRegions();
|
||||
totalRegions += sl.getNumberOfRegions();
|
||||
// Is this correct? Adding a rate to a measure.
|
||||
totalRequests += sl.getNumberOfRequests();
|
||||
lastContact = (System.currentTimeMillis() - sl.getReportTime())/1000;
|
||||
requestsPerSecond = sl.getRequestCountPerSecond();
|
||||
numRegionsOnline = sl.getRegionMetrics().size();
|
||||
totalRegions += sl.getRegionMetrics().size();
|
||||
totalRequestsPerSecond += sl.getRequestCountPerSecond();
|
||||
lastContact = (System.currentTimeMillis() - sl.getReportTimestamp())/1000;
|
||||
}
|
||||
long startcode = serverName.getStartcode();
|
||||
</%java>
|
||||
|
@ -128,7 +129,7 @@ Arrays.sort(serverNames);
|
|||
<%else>
|
||||
<td></td>
|
||||
</%if>
|
||||
<td><% totalRequests %></td>
|
||||
<td><% totalRequestsPerSecond %></td>
|
||||
<td><% totalRegions %></td>
|
||||
</tr>
|
||||
</table>
|
||||
|
@ -149,16 +150,20 @@ Arrays.sort(serverNames);
|
|||
<%java>
|
||||
for (ServerName serverName: serverNames) {
|
||||
|
||||
ServerLoad sl = master.getServerManager().getLoad(serverName);
|
||||
ServerMetrics sl = master.getServerManager().getLoad(serverName);
|
||||
if (sl != null) {
|
||||
long memStoreSizeMB = 0;
|
||||
for (RegionMetrics rl : sl.getRegionMetrics().values()) {
|
||||
memStoreSizeMB += rl.getMemStoreSize().get(Size.Unit.MEGABYTE);
|
||||
}
|
||||
</%java>
|
||||
<tr>
|
||||
<td><& serverNameLink; serverName=serverName; serverLoad = sl; &></td>
|
||||
<td><% TraditionalBinaryPrefix.long2String(sl.getUsedHeapMB()
|
||||
<td><% TraditionalBinaryPrefix.long2String((long) sl.getUsedHeapSize().get(Size.Unit.MEGABYTE)
|
||||
* TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
|
||||
<td><% TraditionalBinaryPrefix.long2String(sl.getMaxHeapMB()
|
||||
<td><% TraditionalBinaryPrefix.long2String((long) sl.getMaxHeapSize().get(Size.Unit.MEGABYTE)
|
||||
* TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
|
||||
<td><% TraditionalBinaryPrefix.long2String(sl.getMemStoreSizeMB()
|
||||
<td><% TraditionalBinaryPrefix.long2String(memStoreSizeMB
|
||||
* TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
|
||||
|
||||
</tr>
|
||||
|
@ -189,15 +194,23 @@ for (ServerName serverName: serverNames) {
|
|||
<%java>
|
||||
for (ServerName serverName: serverNames) {
|
||||
|
||||
ServerLoad sl = master.getServerManager().getLoad(serverName);
|
||||
ServerMetrics sl = master.getServerManager().getLoad(serverName);
|
||||
if (sl != null) {
|
||||
long readRequestCount = 0;
|
||||
long writeRequestCount = 0;
|
||||
long filteredReadRequestCount = 0;
|
||||
for (RegionMetrics rl : sl.getRegionMetrics().values()) {
|
||||
readRequestCount += rl.getReadRequestCount();
|
||||
writeRequestCount += rl.getWriteRequestCount();
|
||||
filteredReadRequestCount += rl.getFilteredReadRequestCount();
|
||||
}
|
||||
</%java>
|
||||
<tr>
|
||||
<td><& serverNameLink; serverName=serverName; serverLoad = sl; &></td>
|
||||
<td><% String.format("%.0f", sl.getRequestsPerSecond()) %></td>
|
||||
<td><% sl.getReadRequestsCount() %></td>
|
||||
<td><% sl.getFilteredReadRequestsCount() %></td>
|
||||
<td><% sl.getWriteRequestsCount() %></td>
|
||||
<td><% sl.getRequestCountPerSecond() %></td>
|
||||
<td><% readRequestCount %></td>
|
||||
<td><% filteredReadRequestCount %></td>
|
||||
<td><% writeRequestCount %></td>
|
||||
</tr>
|
||||
<%java>
|
||||
} else {
|
||||
|
@ -228,20 +241,34 @@ if (sl != null) {
|
|||
<%java>
|
||||
for (ServerName serverName: serverNames) {
|
||||
|
||||
ServerLoad sl = master.getServerManager().getLoad(serverName);
|
||||
ServerMetrics sl = master.getServerManager().getLoad(serverName);
|
||||
if (sl != null) {
|
||||
long storeCount = 0;
|
||||
long storeFileCount = 0;
|
||||
long storeUncompressedSizeMB = 0;
|
||||
long storeFileSizeMB = 0;
|
||||
long totalStaticIndexSizeKB = 0;
|
||||
long totalStaticBloomSizeKB = 0;
|
||||
for (RegionMetrics rl : sl.getRegionMetrics().values()) {
|
||||
storeCount += rl.getStoreCount();
|
||||
storeFileCount += rl.getStoreFileCount();
|
||||
storeUncompressedSizeMB += rl.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE);
|
||||
storeFileSizeMB += rl.getStoreFileSize().get(Size.Unit.MEGABYTE);
|
||||
totalStaticIndexSizeKB += rl.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE);
|
||||
totalStaticBloomSizeKB += rl.getBloomFilterSize().get(Size.Unit.KILOBYTE);
|
||||
}
|
||||
</%java>
|
||||
<tr>
|
||||
<td><& serverNameLink; serverName=serverName; serverLoad = sl; &></td>
|
||||
<td><% sl.getStores() %></td>
|
||||
<td><% sl.getStorefiles() %></td>
|
||||
<td><% storeCount %></td>
|
||||
<td><% storeFileCount %></td>
|
||||
<td><% TraditionalBinaryPrefix.long2String(
|
||||
sl.getStoreUncompressedSizeMB() * TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
|
||||
<td><% TraditionalBinaryPrefix.long2String(sl.getStorefileSizeMB()
|
||||
storeUncompressedSizeMB * TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
|
||||
<td><% TraditionalBinaryPrefix.long2String(storeFileSizeMB
|
||||
* TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
|
||||
<td><% TraditionalBinaryPrefix.long2String(sl.getTotalStaticIndexSizeKB()
|
||||
<td><% TraditionalBinaryPrefix.long2String(totalStaticIndexSizeKB
|
||||
* TraditionalBinaryPrefix.KILO.value, "B", 1) %></td>
|
||||
<td><% TraditionalBinaryPrefix.long2String(sl.getTotalStaticBloomSizeKB()
|
||||
<td><% TraditionalBinaryPrefix.long2String(totalStaticBloomSizeKB
|
||||
* TraditionalBinaryPrefix.KILO.value, "B", 1) %></td>
|
||||
</tr>
|
||||
<%java>
|
||||
|
@ -270,19 +297,25 @@ if (sl != null) {
|
|||
<%java>
|
||||
for (ServerName serverName: serverNames) {
|
||||
|
||||
ServerLoad sl = master.getServerManager().getLoad(serverName);
|
||||
ServerMetrics sl = master.getServerManager().getLoad(serverName);
|
||||
if (sl != null) {
|
||||
long totalCompactingCells = 0;
|
||||
long totalCompactedCells = 0;
|
||||
for (RegionMetrics rl : sl.getRegionMetrics().values()) {
|
||||
totalCompactingCells += rl.getCompactingCellCount();
|
||||
totalCompactedCells += rl.getCompactedCellCount();
|
||||
}
|
||||
String percentDone = "";
|
||||
if (sl.getTotalCompactingKVs() > 0) {
|
||||
if (totalCompactingCells > 0) {
|
||||
percentDone = String.format("%.2f", 100 *
|
||||
((float) sl.getCurrentCompactedKVs() / sl.getTotalCompactingKVs())) + "%";
|
||||
((float) totalCompactedCells / totalCompactingCells)) + "%";
|
||||
}
|
||||
</%java>
|
||||
<tr>
|
||||
<td><& serverNameLink; serverName=serverName; serverLoad = sl; &></td>
|
||||
<td><% sl.getTotalCompactingKVs() %></td>
|
||||
<td><% sl.getCurrentCompactedKVs() %></td>
|
||||
<td><% sl.getTotalCompactingKVs() - sl.getCurrentCompactedKVs() %></td>
|
||||
<td><% totalCompactingCells %></td>
|
||||
<td><% totalCompactedCells %></td>
|
||||
<td><% totalCompactingCells - totalCompactedCells %></td>
|
||||
<td><% percentDone %></td>
|
||||
</tr>
|
||||
<%java>
|
||||
|
@ -300,7 +333,7 @@ if (sl.getTotalCompactingKVs() > 0) {
|
|||
<%def serverNameLink>
|
||||
<%args>
|
||||
ServerName serverName;
|
||||
ServerLoad serverLoad;
|
||||
ServerMetrics serverLoad;
|
||||
</%args>
|
||||
<%java>
|
||||
int infoPort = master.getRegionServerInfoPort(serverName);
|
||||
|
|
|
@ -28,11 +28,10 @@ import java.util.HashMap;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseIOException;
|
||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||
import org.apache.hadoop.hbase.ServerLoad;
|
||||
import org.apache.hadoop.hbase.ServerMetrics;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
|
@ -46,6 +45,7 @@ import org.apache.hadoop.hbase.util.Pair;
|
|||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
|
||||
import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
|
||||
import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
|
||||
|
@ -132,12 +132,12 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored
|
|||
}
|
||||
//the region is currently on none of the favored nodes
|
||||
//get it on one of them if possible
|
||||
ServerLoad l1 = super.services.getServerManager().getLoad(
|
||||
ServerMetrics l1 = super.services.getServerManager().getLoad(
|
||||
serverNameWithoutCodeToServerName.get(favoredNodes.get(1)));
|
||||
ServerLoad l2 = super.services.getServerManager().getLoad(
|
||||
ServerMetrics l2 = super.services.getServerManager().getLoad(
|
||||
serverNameWithoutCodeToServerName.get(favoredNodes.get(2)));
|
||||
if (l1 != null && l2 != null) {
|
||||
if (l1.getLoad() > l2.getLoad()) {
|
||||
if (l1.getRegionMetrics().size() > l2.getRegionMetrics().size()) {
|
||||
destination = serverNameWithoutCodeToServerName.get(favoredNodes.get(2));
|
||||
} else {
|
||||
destination = serverNameWithoutCodeToServerName.get(favoredNodes.get(1));
|
||||
|
@ -296,9 +296,9 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored
|
|||
// assign the region to the one with a lower load
|
||||
// (both have the desired hdfs blocks)
|
||||
ServerName s;
|
||||
ServerLoad tertiaryLoad = super.services.getServerManager().getLoad(tertiaryHost);
|
||||
ServerLoad secondaryLoad = super.services.getServerManager().getLoad(secondaryHost);
|
||||
if (secondaryLoad.getLoad() < tertiaryLoad.getLoad()) {
|
||||
ServerMetrics tertiaryLoad = super.services.getServerManager().getLoad(tertiaryHost);
|
||||
ServerMetrics secondaryLoad = super.services.getServerManager().getLoad(secondaryHost);
|
||||
if (secondaryLoad.getRegionMetrics().size() < tertiaryLoad.getRegionMetrics().size()) {
|
||||
s = secondaryHost;
|
||||
} else {
|
||||
s = tertiaryHost;
|
||||
|
|
|
@ -24,13 +24,10 @@ import java.io.PrintStream;
|
|||
import java.io.PrintWriter;
|
||||
import java.util.Date;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.servlet.http.HttpServletResponse;
|
||||
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.ServerLoad;
|
||||
import org.apache.hadoop.hbase.ServerMetrics;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
|
||||
import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
|
||||
|
@ -39,6 +36,7 @@ import org.apache.hadoop.hbase.monitoring.StateDumpServlet;
|
|||
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
|
||||
import org.apache.hadoop.hbase.regionserver.RSDumpServlet;
|
||||
import org.apache.hadoop.hbase.util.Threads;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
public class MasterDumpServlet extends StateDumpServlet {
|
||||
|
@ -132,8 +130,8 @@ public class MasterDumpServlet extends StateDumpServlet {
|
|||
return;
|
||||
}
|
||||
|
||||
Map<ServerName, ServerLoad> servers = sm.getOnlineServers();
|
||||
for (Map.Entry<ServerName, ServerLoad> e : servers.entrySet()) {
|
||||
Map<ServerName, ServerMetrics> servers = sm.getOnlineServers();
|
||||
for (Map.Entry<ServerName, ServerMetrics> e : servers.entrySet()) {
|
||||
out.println(e.getKey() + ": " + e.getValue());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,7 +37,8 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.MetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.NamespaceDescriptor;
|
||||
import org.apache.hadoop.hbase.Server;
|
||||
import org.apache.hadoop.hbase.ServerLoad;
|
||||
import org.apache.hadoop.hbase.ServerMetrics;
|
||||
import org.apache.hadoop.hbase.ServerMetricsBuilder;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.UnknownRegionException;
|
||||
|
@ -101,6 +102,7 @@ import org.slf4j.LoggerFactory;
|
|||
import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
|
||||
import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
|
||||
import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
|
||||
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
|
||||
|
@ -447,16 +449,16 @@ public class MasterRpcServices extends RSRpcServices
|
|||
master.checkServiceStarted();
|
||||
ClusterStatusProtos.ServerLoad sl = request.getLoad();
|
||||
ServerName serverName = ProtobufUtil.toServerName(request.getServer());
|
||||
ServerLoad oldLoad = master.getServerManager().getLoad(serverName);
|
||||
ServerLoad newLoad = new ServerLoad(serverName, sl);
|
||||
ServerMetrics oldLoad = master.getServerManager().getLoad(serverName);
|
||||
ServerMetrics newLoad = ServerMetricsBuilder.toServerMetrics(serverName, sl);
|
||||
master.getServerManager().regionServerReport(serverName, newLoad);
|
||||
int version = VersionInfoUtil.getCurrentClientVersionNumber();
|
||||
master.getAssignmentManager().reportOnlineRegions(serverName,
|
||||
version, newLoad.getRegionsLoad().keySet());
|
||||
version, newLoad.getRegionMetrics().keySet());
|
||||
if (sl != null && master.metricsMaster != null) {
|
||||
// Up our metrics.
|
||||
master.metricsMaster.incrementRequests(sl.getTotalNumberOfRequests()
|
||||
- (oldLoad != null ? oldLoad.getTotalNumberOfRequests() : 0));
|
||||
- (oldLoad != null ? oldLoad.getRequestCount() : 0));
|
||||
}
|
||||
} catch (IOException ioe) {
|
||||
throw new ServiceException(ioe);
|
||||
|
|
|
@ -37,13 +37,12 @@ import java.util.concurrent.ConcurrentSkipListMap;
|
|||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.ClockOutOfSyncException;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.NotServingRegionException;
|
||||
import org.apache.hadoop.hbase.RegionLoad;
|
||||
import org.apache.hadoop.hbase.ServerLoad;
|
||||
import org.apache.hadoop.hbase.RegionMetrics;
|
||||
import org.apache.hadoop.hbase.ServerMetrics;
|
||||
import org.apache.hadoop.hbase.ServerMetricsBuilder;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.YouAreDeadException;
|
||||
|
@ -62,8 +61,10 @@ import org.apache.yetus.audience.InterfaceAudience;
|
|||
import org.apache.zookeeper.KeeperException;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
|
||||
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
|
||||
|
@ -124,7 +125,8 @@ public class ServerManager {
|
|||
storeFlushedSequenceIdsByRegion = new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR);
|
||||
|
||||
/** Map of registered servers to their current load */
|
||||
private final ConcurrentNavigableMap<ServerName, ServerLoad> onlineServers = new ConcurrentSkipListMap<>();
|
||||
private final ConcurrentNavigableMap<ServerName, ServerMetrics> onlineServers =
|
||||
new ConcurrentSkipListMap<>();
|
||||
|
||||
/**
|
||||
* Map of admin interfaces per registered regionserver; these interfaces we use to control
|
||||
|
@ -240,7 +242,7 @@ public class ServerManager {
|
|||
request.getServerStartCode());
|
||||
checkClockSkew(sn, request.getServerCurrentTime());
|
||||
checkIsDead(sn, "STARTUP");
|
||||
if (!checkAndRecordNewServer(sn, new ServerLoad(ServerMetricsBuilder.of(sn)))) {
|
||||
if (!checkAndRecordNewServer(sn, ServerMetricsBuilder.of(sn))) {
|
||||
LOG.warn("THIS SHOULD NOT HAPPEN, RegionServerStartup"
|
||||
+ " could not record the server: " + sn);
|
||||
}
|
||||
|
@ -252,12 +254,11 @@ public class ServerManager {
|
|||
* @param sn
|
||||
* @param hsl
|
||||
*/
|
||||
private void updateLastFlushedSequenceIds(ServerName sn, ServerLoad hsl) {
|
||||
Map<byte[], RegionLoad> regionsLoad = hsl.getRegionsLoad();
|
||||
for (Entry<byte[], RegionLoad> entry : regionsLoad.entrySet()) {
|
||||
private void updateLastFlushedSequenceIds(ServerName sn, ServerMetrics hsl) {
|
||||
for (Entry<byte[], RegionMetrics> entry : hsl.getRegionMetrics().entrySet()) {
|
||||
byte[] encodedRegionName = Bytes.toBytes(RegionInfo.encodeRegionName(entry.getKey()));
|
||||
Long existingValue = flushedSequenceIdByRegion.get(encodedRegionName);
|
||||
long l = entry.getValue().getCompleteSequenceId();
|
||||
long l = entry.getValue().getCompletedSequenceId();
|
||||
// Don't let smaller sequence ids override greater sequence ids.
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace(Bytes.toString(encodedRegionName) + ", existingValue=" + existingValue +
|
||||
|
@ -273,10 +274,10 @@ public class ServerManager {
|
|||
ConcurrentNavigableMap<byte[], Long> storeFlushedSequenceId =
|
||||
computeIfAbsent(storeFlushedSequenceIdsByRegion, encodedRegionName,
|
||||
() -> new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR));
|
||||
for (StoreSequenceId storeSeqId : entry.getValue().getStoreCompleteSequenceId()) {
|
||||
byte[] family = storeSeqId.getFamilyName().toByteArray();
|
||||
for (Entry<byte[], Long> storeSeqId : entry.getValue().getStoreSequenceId().entrySet()) {
|
||||
byte[] family = storeSeqId.getKey();
|
||||
existingValue = storeFlushedSequenceId.get(family);
|
||||
l = storeSeqId.getSequenceId();
|
||||
l = storeSeqId.getValue();
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace(Bytes.toString(encodedRegionName) + ", family=" + Bytes.toString(family) +
|
||||
", existingValue=" + existingValue + ", completeSequenceId=" + l);
|
||||
|
@ -291,7 +292,7 @@ public class ServerManager {
|
|||
|
||||
@VisibleForTesting
|
||||
public void regionServerReport(ServerName sn,
|
||||
ServerLoad sl) throws YouAreDeadException {
|
||||
ServerMetrics sl) throws YouAreDeadException {
|
||||
checkIsDead(sn, "REPORT");
|
||||
if (null == this.onlineServers.replace(sn, sl)) {
|
||||
// Already have this host+port combo and its just different start code?
|
||||
|
@ -316,7 +317,7 @@ public class ServerManager {
|
|||
* @param sl the server load on the server
|
||||
* @return true if the server is recorded, otherwise, false
|
||||
*/
|
||||
boolean checkAndRecordNewServer(final ServerName serverName, final ServerLoad sl) {
|
||||
boolean checkAndRecordNewServer(final ServerName serverName, final ServerMetrics sl) {
|
||||
ServerName existingServer = null;
|
||||
synchronized (this.onlineServers) {
|
||||
existingServer = findServerWithSameHostnamePortWithLock(serverName);
|
||||
|
@ -423,7 +424,7 @@ public class ServerManager {
|
|||
* @param serverName The remote servers name.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
void recordNewServerWithLock(final ServerName serverName, final ServerLoad sl) {
|
||||
void recordNewServerWithLock(final ServerName serverName, final ServerMetrics sl) {
|
||||
LOG.info("Registering regionserver=" + serverName);
|
||||
this.onlineServers.put(serverName, sl);
|
||||
this.rsAdmins.remove(serverName);
|
||||
|
@ -447,9 +448,9 @@ public class ServerManager {
|
|||
|
||||
/**
|
||||
* @param serverName
|
||||
* @return ServerLoad if serverName is known else null
|
||||
* @return ServerMetrics if serverName is known else null
|
||||
*/
|
||||
public ServerLoad getLoad(final ServerName serverName) {
|
||||
public ServerMetrics getLoad(final ServerName serverName) {
|
||||
return this.onlineServers.get(serverName);
|
||||
}
|
||||
|
||||
|
@ -462,9 +463,9 @@ public class ServerManager {
|
|||
public double getAverageLoad() {
|
||||
int totalLoad = 0;
|
||||
int numServers = 0;
|
||||
for (ServerLoad sl: this.onlineServers.values()) {
|
||||
numServers++;
|
||||
totalLoad += sl.getNumberOfRegions();
|
||||
for (ServerMetrics sl : this.onlineServers.values()) {
|
||||
numServers++;
|
||||
totalLoad += sl.getRegionMetrics().size();
|
||||
}
|
||||
return numServers == 0 ? 0 :
|
||||
(double)totalLoad / (double)numServers;
|
||||
|
@ -479,7 +480,7 @@ public class ServerManager {
|
|||
/**
|
||||
* @return Read-only map of servers to serverinfo
|
||||
*/
|
||||
public Map<ServerName, ServerLoad> getOnlineServers() {
|
||||
public Map<ServerName, ServerMetrics> getOnlineServers() {
|
||||
// Presumption is that iterating the returned Map is OK.
|
||||
synchronized (this.onlineServers) {
|
||||
return Collections.unmodifiableMap(this.onlineServers);
|
||||
|
@ -907,11 +908,11 @@ public class ServerManager {
|
|||
* @return A copy of the internal list of online servers matched by the predicator
|
||||
*/
|
||||
public List<ServerName> getOnlineServersListWithPredicator(List<ServerName> keys,
|
||||
Predicate<ServerLoad> idleServerPredicator) {
|
||||
Predicate<ServerMetrics> idleServerPredicator) {
|
||||
List<ServerName> names = new ArrayList<>();
|
||||
if (keys != null && idleServerPredicator != null) {
|
||||
keys.forEach(name -> {
|
||||
ServerLoad load = onlineServers.get(name);
|
||||
ServerMetrics load = onlineServers.get(name);
|
||||
if (load != null) {
|
||||
if (idleServerPredicator.test(load)) {
|
||||
names.add(name);
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
/*
|
||||
/**
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -33,7 +34,6 @@ import java.util.Set;
|
|||
import java.util.TreeMap;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.commons.lang3.NotImplementedException;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.ClusterMetrics;
|
||||
|
@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
|
|||
import org.apache.hadoop.hbase.HBaseIOException;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
|
||||
import org.apache.hadoop.hbase.ServerLoad;
|
||||
import org.apache.hadoop.hbase.ServerMetrics;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
|
@ -73,8 +73,8 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
|
|||
|
||||
private static final List<RegionInfo> EMPTY_REGION_LIST = new ArrayList<>(0);
|
||||
|
||||
static final Predicate<ServerLoad> IDLE_SERVER_PREDICATOR
|
||||
= load -> load.getNumberOfRegions() == 0;
|
||||
static final Predicate<ServerMetrics> IDLE_SERVER_PREDICATOR
|
||||
= load -> load.getRegionMetrics().isEmpty();
|
||||
|
||||
protected RegionLocationFinder regionFinder;
|
||||
protected boolean useRegionFinder;
|
||||
|
|
|
@ -31,9 +31,8 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseIOException;
|
||||
import org.apache.hadoop.hbase.ServerLoad;
|
||||
import org.apache.hadoop.hbase.ServerMetrics;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.apache.hadoop.hbase.favored.FavoredNodeAssignmentHelper;
|
||||
|
@ -47,6 +46,7 @@ import org.apache.hadoop.hbase.master.RegionPlan;
|
|||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
|
||||
import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
|
||||
import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
|
||||
|
@ -274,10 +274,10 @@ public class FavoredStochasticBalancer extends StochasticLoadBalancer implements
|
|||
|
||||
// Assign the region to the one with a lower load (both have the desired hdfs blocks)
|
||||
ServerName s;
|
||||
ServerLoad tertiaryLoad = super.services.getServerManager().getLoad(tertiaryHost);
|
||||
ServerLoad secondaryLoad = super.services.getServerManager().getLoad(secondaryHost);
|
||||
ServerMetrics tertiaryLoad = super.services.getServerManager().getLoad(tertiaryHost);
|
||||
ServerMetrics secondaryLoad = super.services.getServerManager().getLoad(secondaryHost);
|
||||
if (secondaryLoad != null && tertiaryLoad != null) {
|
||||
if (secondaryLoad.getLoad() < tertiaryLoad.getLoad()) {
|
||||
if (secondaryLoad.getRegionMetrics().size() < tertiaryLoad.getRegionMetrics().size()) {
|
||||
s = secondaryHost;
|
||||
} else {
|
||||
s = tertiaryHost;
|
||||
|
|
|
@ -22,10 +22,10 @@ import java.util.ArrayList;
|
|||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseIOException;
|
||||
import org.apache.hadoop.hbase.RegionLoad;
|
||||
import org.apache.hadoop.hbase.RegionMetrics;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.Size;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.MasterSwitchType;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
|
@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
|
|||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
|
||||
|
||||
/**
|
||||
|
@ -44,7 +45,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
|
|||
*
|
||||
* <ol>
|
||||
* <li> Get all regions of a given table
|
||||
* <li> Get avg size S of each region (by total size of store files reported in RegionLoad)
|
||||
* <li> Get avg size S of each region (by total size of store files reported in RegionMetrics)
|
||||
* <li> Seek every single region one by one. If a region R0 is bigger than S * 2, it is
|
||||
* kindly requested to split. Thereon evaluate the next region R1
|
||||
* <li> Otherwise, if R0 + R1 is smaller than S, R0 and R1 are kindly requested to merge.
|
||||
|
@ -204,12 +205,12 @@ public class SimpleRegionNormalizer implements RegionNormalizer {
|
|||
private long getRegionSize(RegionInfo hri) {
|
||||
ServerName sn = masterServices.getAssignmentManager().getRegionStates().
|
||||
getRegionServerOfRegion(hri);
|
||||
RegionLoad regionLoad = masterServices.getServerManager().getLoad(sn).
|
||||
getRegionsLoad().get(hri.getRegionName());
|
||||
RegionMetrics regionLoad = masterServices.getServerManager().getLoad(sn).
|
||||
getRegionMetrics().get(hri.getRegionName());
|
||||
if (regionLoad == null) {
|
||||
LOG.debug(hri.getRegionNameAsString() + " was not found in RegionsLoad");
|
||||
return -1;
|
||||
}
|
||||
return regionLoad.getStorefileSizeMB();
|
||||
return (long) regionLoad.getStoreFileSize().get(Size.Unit.MEGABYTE);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,7 +28,6 @@
|
|||
import="java.util.stream.Collectors"
|
||||
import="org.apache.hadoop.hbase.HTableDescriptor"
|
||||
import="org.apache.hadoop.hbase.RSGroupTableAccessor"
|
||||
import="org.apache.hadoop.hbase.ServerLoad"
|
||||
import="org.apache.hadoop.hbase.ServerName"
|
||||
import="org.apache.hadoop.hbase.TableName"
|
||||
import="org.apache.hadoop.hbase.client.Admin"
|
||||
|
@ -42,6 +41,9 @@
|
|||
import="org.apache.hadoop.hbase.util.Bytes"
|
||||
import="org.apache.hadoop.hbase.util.VersionInfo"
|
||||
import="org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix"%>
|
||||
<%@ page import="org.apache.hadoop.hbase.ServerMetrics" %>
|
||||
<%@ page import="org.apache.hadoop.hbase.Size" %>
|
||||
<%@ page import="org.apache.hadoop.hbase.RegionMetrics" %>
|
||||
<%
|
||||
HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
|
||||
String rsGroupName = request.getParameter("name");
|
||||
|
@ -67,7 +69,7 @@
|
|||
return 0;
|
||||
});
|
||||
|
||||
Map<Address, ServerLoad> onlineServers = Collections.emptyMap();
|
||||
Map<Address, ServerMetrics> onlineServers = Collections.emptyMap();
|
||||
Map<Address, ServerName> serverMaping = Collections.emptyMap();
|
||||
if (master.getServerManager() != null) {
|
||||
onlineServers = master.getServerManager().getOnlineServers().entrySet().stream()
|
||||
|
@ -141,7 +143,7 @@
|
|||
for (Address server: rsGroupServers) {
|
||||
ServerName serverName = serverMaping.get(server);
|
||||
if (serverName != null) {
|
||||
ServerLoad sl = onlineServers.get(server);
|
||||
ServerMetrics sl = onlineServers.get(server);
|
||||
String version = master.getRegionServerVersion(serverName);
|
||||
if (!masterVersion.equals(version)) {
|
||||
inconsistentNodeNum ++;
|
||||
|
@ -150,11 +152,11 @@
|
|||
int numRegionsOnline = 0;
|
||||
long lastContact = 0;
|
||||
if (sl != null) {
|
||||
requestsPerSecond = sl.getRequestsPerSecond();
|
||||
numRegionsOnline = sl.getNumberOfRegions();
|
||||
totalRegions += sl.getNumberOfRegions();
|
||||
totalRequests += sl.getNumberOfRequests();
|
||||
lastContact = (System.currentTimeMillis() - sl.getReportTime())/1000;
|
||||
requestsPerSecond = sl.getRequestCountPerSecond();
|
||||
numRegionsOnline = sl.getRegionMetrics().size();
|
||||
totalRegions += sl.getRegionMetrics().size();
|
||||
totalRequests += sl.getRequestCount();
|
||||
lastContact = (System.currentTimeMillis() - sl.getReportTimestamp())/1000;
|
||||
}
|
||||
long startcode = serverName.getStartcode();
|
||||
int infoPort = master.getRegionServerInfoPort(serverName);
|
||||
|
@ -201,18 +203,21 @@
|
|||
</tr>
|
||||
<% for (Address server: rsGroupServers) {
|
||||
ServerName serverName = serverMaping.get(server);
|
||||
ServerLoad sl = onlineServers.get(server);
|
||||
ServerMetrics sl = onlineServers.get(server);
|
||||
if (sl != null && serverName != null) {
|
||||
double memStoreSizeMB = sl.getRegionMetrics().values()
|
||||
.stream().mapToDouble(rm -> rm.getMemStoreSize().get(Size.Unit.MEGABYTE))
|
||||
.sum();
|
||||
int infoPort = master.getRegionServerInfoPort(serverName);
|
||||
String url = "//" + serverName.getHostname() + ":" + infoPort + "/rs-status";
|
||||
%>
|
||||
<tr>
|
||||
<td><a href="<%= url %>"><%= serverName.getServerName() %></a></td>
|
||||
<td><%= TraditionalBinaryPrefix.long2String(sl.getUsedHeapMB()
|
||||
<td><%= TraditionalBinaryPrefix.long2String((long) sl.getUsedHeapSize().get(Size.Unit.MEGABYTE)
|
||||
* TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
|
||||
<td><%= TraditionalBinaryPrefix.long2String(sl.getMaxHeapMB()
|
||||
<td><%= TraditionalBinaryPrefix.long2String((long) sl.getMaxHeapSize().get(Size.Unit.MEGABYTE)
|
||||
* TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
|
||||
<td><%= TraditionalBinaryPrefix.long2String(sl.getMemstoreSizeInMB()
|
||||
<td><%= TraditionalBinaryPrefix.long2String((long) memStoreSizeMB
|
||||
* TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
|
||||
</tr>
|
||||
<% } else { %>
|
||||
|
@ -236,16 +241,22 @@
|
|||
</tr>
|
||||
<% for (Address server: rsGroupServers) {
|
||||
ServerName serverName = serverMaping.get(server);
|
||||
ServerLoad sl = onlineServers.get(server);
|
||||
ServerMetrics sl = onlineServers.get(server);
|
||||
if (sl != null && serverName != null) {
|
||||
int infoPort = master.getRegionServerInfoPort(serverName);
|
||||
long readRequestCount = 0;
|
||||
long writeRequestCount = 0;
|
||||
for (RegionMetrics rm : sl.getRegionMetrics().values()) {
|
||||
readRequestCount += rm.getReadRequestCount();
|
||||
writeRequestCount += rm.getWriteRequestCount();
|
||||
}
|
||||
String url = "//" + serverName.getHostname() + ":" + infoPort + "/rs-status";
|
||||
%>
|
||||
<tr>
|
||||
<td><a href="<%= url %>"><%= serverName.getServerName() %></a></td>
|
||||
<td><%= String.format("%.0f", sl.getRequestsPerSecond()) %></td>
|
||||
<td><%= sl.getReadRequestsCount() %></td>
|
||||
<td><%= sl.getWriteRequestsCount() %></td>
|
||||
<td><%= String.format("%.0f", sl.getRequestCountPerSecond()) %></td>
|
||||
<td><%= readRequestCount %></td>
|
||||
<td><%= writeRequestCount %></td>
|
||||
</tr>
|
||||
<% } else { %>
|
||||
<tr>
|
||||
|
@ -271,22 +282,36 @@
|
|||
</tr>
|
||||
<% for (Address server: rsGroupServers) {
|
||||
ServerName serverName = serverMaping.get(server);
|
||||
ServerLoad sl = onlineServers.get(server);
|
||||
ServerMetrics sl = onlineServers.get(server);
|
||||
if (sl != null && serverName != null) {
|
||||
long storeCount = 0;
|
||||
long storeFileCount = 0;
|
||||
double storeUncompressedSizeMB = 0;
|
||||
double storeFileSizeMB = 0;
|
||||
double totalStaticIndexSizeKB = 0;
|
||||
double totalStaticBloomSizeKB = 0;
|
||||
for (RegionMetrics rm : sl.getRegionMetrics().values()) {
|
||||
storeCount += rm.getStoreCount();
|
||||
storeFileCount += rm.getStoreFileCount();
|
||||
storeUncompressedSizeMB += rm.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE);
|
||||
storeFileSizeMB += rm.getStoreFileSize().get(Size.Unit.MEGABYTE);
|
||||
totalStaticIndexSizeKB += rm.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE);
|
||||
totalStaticBloomSizeKB += rm.getBloomFilterSize().get(Size.Unit.KILOBYTE);
|
||||
}
|
||||
int infoPort = master.getRegionServerInfoPort(serverName);
|
||||
String url = "//" + serverName.getHostname() + ":" + infoPort + "/rs-status";
|
||||
%>
|
||||
<tr>
|
||||
<td><a href="<%= url %>"><%= serverName.getServerName() %></a></td>
|
||||
<td><%= sl.getStores() %></td>
|
||||
<td><%= sl.getStorefiles() %></td>
|
||||
<td><%= storeCount %></td>
|
||||
<td><%= storeFileCount %></td>
|
||||
<td><%= TraditionalBinaryPrefix.long2String(
|
||||
sl.getStoreUncompressedSizeMB() * TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
|
||||
<td><%= TraditionalBinaryPrefix.long2String(sl.getStorefileSizeInMB()
|
||||
(long) storeUncompressedSizeMB * TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
|
||||
<td><%= TraditionalBinaryPrefix.long2String((long) storeFileSizeMB
|
||||
* TraditionalBinaryPrefix.MEGA.value, "B", 1) %></td>
|
||||
<td><%= TraditionalBinaryPrefix.long2String(sl.getTotalStaticIndexSizeKB()
|
||||
<td><%= TraditionalBinaryPrefix.long2String((long) totalStaticIndexSizeKB
|
||||
* TraditionalBinaryPrefix.KILO.value, "B", 1) %></td>
|
||||
<td><%= TraditionalBinaryPrefix.long2String(sl.getTotalStaticBloomSizeKB()
|
||||
<td><%= TraditionalBinaryPrefix.long2String((long) totalStaticBloomSizeKB
|
||||
* TraditionalBinaryPrefix.KILO.value, "B", 1) %></td>
|
||||
</tr>
|
||||
<% } else { %>
|
||||
|
@ -314,21 +339,27 @@
|
|||
</tr>
|
||||
<% for (Address server: rsGroupServers) {
|
||||
ServerName serverName = serverMaping.get(server);
|
||||
ServerLoad sl = onlineServers.get(server);
|
||||
ServerMetrics sl = onlineServers.get(server);
|
||||
if (sl != null && serverName != null) {
|
||||
long totalCompactingCells = 0;
|
||||
long currentCompactedCells = 0;
|
||||
for (RegionMetrics rm : sl.getRegionMetrics().values()) {
|
||||
totalCompactingCells += rm.getCompactingCellCount();
|
||||
currentCompactedCells += rm.getCompactedCellCount();
|
||||
}
|
||||
String percentDone = "";
|
||||
if (sl.getTotalCompactingKVs() > 0) {
|
||||
if (totalCompactingCells > 0) {
|
||||
percentDone = String.format("%.2f", 100 *
|
||||
((float) sl.getCurrentCompactedKVs() / sl.getTotalCompactingKVs())) + "%";
|
||||
((float) currentCompactedCells / totalCompactingCells)) + "%";
|
||||
}
|
||||
int infoPort = master.getRegionServerInfoPort(serverName);
|
||||
String url = "//" + serverName.getHostname() + ":" + infoPort + "/rs-status";
|
||||
%>
|
||||
<tr>
|
||||
<td><a href="<%= url %>"><%= serverName.getServerName() %></a></td>
|
||||
<td><%= sl.getTotalCompactingKVs() %></td>
|
||||
<td><%= sl.getCurrentCompactedKVs() %></td>
|
||||
<td><%= sl.getTotalCompactingKVs() - sl.getCurrentCompactedKVs() %></td>
|
||||
<td><%= totalCompactingCells %></td>
|
||||
<td><%= currentCompactedCells %></td>
|
||||
<td><%= totalCompactingCells - currentCompactedCells %></td>
|
||||
<td><%= percentDone %></td>
|
||||
</tr>
|
||||
<% } else { %>
|
||||
|
|
|
@ -33,8 +33,6 @@
|
|||
import="org.apache.hadoop.hbase.HColumnDescriptor"
|
||||
import="org.apache.hadoop.hbase.HConstants"
|
||||
import="org.apache.hadoop.hbase.HRegionLocation"
|
||||
import="org.apache.hadoop.hbase.RegionLoad"
|
||||
import="org.apache.hadoop.hbase.ServerLoad"
|
||||
import="org.apache.hadoop.hbase.ServerName"
|
||||
import="org.apache.hadoop.hbase.TableName"
|
||||
import="org.apache.hadoop.hbase.TableNotFoundException"
|
||||
|
@ -57,16 +55,20 @@
|
|||
<%@ page import="org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos" %>
|
||||
<%@ page import="org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas" %>
|
||||
<%@ page import="org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota" %>
|
||||
<%@ page import="org.apache.hadoop.hbase.ServerMetrics" %>
|
||||
<%@ page import="org.apache.hadoop.hbase.RegionMetrics" %>
|
||||
<%@ page import="org.apache.hadoop.hbase.Size" %>
|
||||
<%@ page import="org.apache.hadoop.hbase.RegionMetricsBuilder" %>
|
||||
<%!
|
||||
/**
|
||||
* @return An empty region load stamped with the passed in <code>regionInfo</code>
|
||||
* region name.
|
||||
*/
|
||||
private RegionLoad getEmptyRegionLoad(final RegionInfo regionInfo) {
|
||||
return new RegionLoad(ClusterStatusProtos.RegionLoad.newBuilder().
|
||||
setRegionSpecifier(HBaseProtos.RegionSpecifier.newBuilder().
|
||||
setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME).
|
||||
setValue(ByteString.copyFrom(regionInfo.getRegionName())).build()).build());
|
||||
private RegionMetrics getEmptyRegionMetrics(final RegionInfo regionInfo) {
|
||||
return RegionMetricsBuilder.toRegionMetrics(ClusterStatusProtos.RegionLoad.newBuilder().
|
||||
setRegionSpecifier(HBaseProtos.RegionSpecifier.newBuilder().
|
||||
setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME).
|
||||
setValue(ByteString.copyFrom(regionInfo.getRegionName())).build()).build());
|
||||
}
|
||||
%>
|
||||
<%
|
||||
|
@ -84,7 +86,6 @@
|
|||
Table table;
|
||||
String tableHeader;
|
||||
boolean withReplica = false;
|
||||
ServerName rl = metaTableLocator.getMetaRegionLocation(master.getZooKeeper());
|
||||
boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false);
|
||||
boolean readOnly = conf.getBoolean("hbase.master.ui.readonly", false);
|
||||
int numMetaReplicas = conf.getInt(HConstants.META_REPLICAS_NUM,
|
||||
|
@ -213,18 +214,18 @@ if ( fqtn != null ) {
|
|||
float locality = 0.0f;
|
||||
|
||||
if (metaLocation != null) {
|
||||
ServerLoad sl = master.getServerManager().getLoad(metaLocation);
|
||||
ServerMetrics sl = master.getServerManager().getLoad(metaLocation);
|
||||
// The host name portion should be safe, but I don't know how we handle IDNs so err on the side of failing safely.
|
||||
hostAndPort = URLEncoder.encode(metaLocation.getHostname()) + ":" + master.getRegionServerInfoPort(metaLocation);
|
||||
if (sl != null) {
|
||||
Map<byte[], RegionLoad> map = sl.getRegionsLoad();
|
||||
Map<byte[], RegionMetrics> map = sl.getRegionMetrics();
|
||||
if (map.containsKey(meta.getRegionName())) {
|
||||
RegionLoad load = map.get(meta.getRegionName());
|
||||
readReq = String.format("%,1d", load.getReadRequestsCount());
|
||||
writeReq = String.format("%,1d", load.getWriteRequestsCount());
|
||||
fileSize = StringUtils.byteDesc(load.getStorefileSizeMB()*1024l*1024);
|
||||
fileCount = String.format("%,1d", load.getStorefiles());
|
||||
memSize = StringUtils.byteDesc(load.getMemStoreSizeMB()*1024l*1024);
|
||||
RegionMetrics load = map.get(meta.getRegionName());
|
||||
readReq = String.format("%,1d", load.getReadRequestCount());
|
||||
writeReq = String.format("%,1d", load.getWriteRequestCount());
|
||||
fileSize = StringUtils.byteDesc((long) load.getStoreFileSize().get(Size.Unit.BYTE));
|
||||
fileCount = String.format("%,1d", load.getStoreFileCount());
|
||||
memSize = StringUtils.byteDesc((long) load.getMemStoreSize().get(Size.Unit.BYTE));
|
||||
locality = load.getDataLocality();
|
||||
}
|
||||
}
|
||||
|
@ -389,7 +390,7 @@ if ( fqtn != null ) {
|
|||
Map<ServerName, Integer> regDistribution = new TreeMap<>();
|
||||
Map<ServerName, Integer> primaryRegDistribution = new TreeMap<>();
|
||||
List<HRegionLocation> regions = r.getAllRegionLocations();
|
||||
Map<RegionInfo, RegionLoad> regionsToLoad = new LinkedHashMap<>();
|
||||
Map<RegionInfo, RegionMetrics> regionsToLoad = new LinkedHashMap<>();
|
||||
Map<RegionInfo, ServerName> regionsToServer = new LinkedHashMap<>();
|
||||
for (HRegionLocation hriEntry : regions) {
|
||||
RegionInfo regionInfo = hriEntry.getRegionInfo();
|
||||
|
@ -397,28 +398,27 @@ if ( fqtn != null ) {
|
|||
regionsToServer.put(regionInfo, addr);
|
||||
|
||||
if (addr != null) {
|
||||
ServerLoad sl = master.getServerManager().getLoad(addr);
|
||||
ServerMetrics sl = master.getServerManager().getLoad(addr);
|
||||
if (sl != null) {
|
||||
Map<byte[], RegionLoad> map = sl.getRegionsLoad();
|
||||
RegionLoad regionload = map.get(regionInfo.getRegionName());
|
||||
regionsToLoad.put(regionInfo, regionload);
|
||||
if(regionload != null) {
|
||||
totalReadReq += regionload.getReadRequestsCount();
|
||||
totalWriteReq += regionload.getWriteRequestsCount();
|
||||
totalSize += regionload.getStorefileSizeMB();
|
||||
totalStoreFileCount += regionload.getStorefiles();
|
||||
totalMemSize += regionload.getMemStoreSizeMB();
|
||||
totalStoreFileSizeMB += regionload.getStorefileSizeMB();
|
||||
RegionMetrics regionMetrics = sl.getRegionMetrics().get(regionInfo.getRegionName());
|
||||
regionsToLoad.put(regionInfo, regionMetrics);
|
||||
if(regionMetrics != null) {
|
||||
totalReadReq += regionMetrics.getReadRequestCount();
|
||||
totalWriteReq += regionMetrics.getWriteRequestCount();
|
||||
totalSize += regionMetrics.getStoreFileSize().get(Size.Unit.MEGABYTE);
|
||||
totalStoreFileCount += regionMetrics.getStoreFileCount();
|
||||
totalMemSize += regionMetrics.getMemStoreSize().get(Size.Unit.MEGABYTE);
|
||||
totalStoreFileSizeMB += regionMetrics.getStoreFileSize().get(Size.Unit.MEGABYTE);
|
||||
} else {
|
||||
RegionLoad load0 = getEmptyRegionLoad(regionInfo);
|
||||
RegionMetrics load0 = getEmptyRegionMetrics(regionInfo);
|
||||
regionsToLoad.put(regionInfo, load0);
|
||||
}
|
||||
} else{
|
||||
RegionLoad load0 = getEmptyRegionLoad(regionInfo);
|
||||
RegionMetrics load0 = getEmptyRegionMetrics(regionInfo);
|
||||
regionsToLoad.put(regionInfo, load0);
|
||||
}
|
||||
} else {
|
||||
RegionLoad load0 = getEmptyRegionLoad(regionInfo);
|
||||
RegionMetrics load0 = getEmptyRegionMetrics(regionInfo);
|
||||
regionsToLoad.put(regionInfo, load0);
|
||||
}
|
||||
}
|
||||
|
@ -462,156 +462,92 @@ ShowDetailName&Start/End Key<input type="checkbox" id="showWhole" style="margin-
|
|||
</tr>
|
||||
|
||||
<%
|
||||
List<Map.Entry<RegionInfo, RegionLoad>> entryList = new ArrayList<>(regionsToLoad.entrySet());
|
||||
List<Map.Entry<RegionInfo, RegionMetrics>> entryList = new ArrayList<>(regionsToLoad.entrySet());
|
||||
if(sortKey != null) {
|
||||
if (sortKey.equals("readrequest")) {
|
||||
Collections.sort(entryList,
|
||||
new Comparator<Map.Entry<RegionInfo, RegionLoad>>() {
|
||||
public int compare(
|
||||
Map.Entry<RegionInfo, RegionLoad> entry1,
|
||||
Map.Entry<RegionInfo, RegionLoad> entry2) {
|
||||
if (entry1 == null || entry1.getValue() == null) {
|
||||
return -1;
|
||||
} else if (entry2 == null || entry2.getValue() == null) {
|
||||
return 1;
|
||||
}
|
||||
int result = 0;
|
||||
if (entry1.getValue().getReadRequestsCount() < entry2.getValue().getReadRequestsCount()) {
|
||||
result = -1;
|
||||
} else if (entry1.getValue().getReadRequestsCount() > entry2.getValue().getReadRequestsCount()) {
|
||||
result = 1;
|
||||
}
|
||||
if (reverseOrder) {
|
||||
result = -1 * result;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
});
|
||||
Collections.sort(entryList, (entry1, entry2) -> {
|
||||
if (entry1 == null || entry1.getValue() == null) {
|
||||
return -1;
|
||||
} else if (entry2 == null || entry2.getValue() == null) {
|
||||
return 1;
|
||||
}
|
||||
int result = Long.compare(entry1.getValue().getReadRequestCount(),
|
||||
entry2.getValue().getReadRequestCount());
|
||||
if (reverseOrder) {
|
||||
result = -1 * result;
|
||||
}
|
||||
return result;
|
||||
});
|
||||
} else if (sortKey.equals("writerequest")) {
|
||||
Collections.sort(entryList,
|
||||
new Comparator<Map.Entry<RegionInfo, RegionLoad>>() {
|
||||
public int compare(
|
||||
Map.Entry<RegionInfo, RegionLoad> entry1,
|
||||
Map.Entry<RegionInfo, RegionLoad> entry2) {
|
||||
if (entry1 == null || entry1.getValue() == null) {
|
||||
return -1;
|
||||
} else if (entry2 == null || entry2.getValue() == null) {
|
||||
return 1;
|
||||
}
|
||||
int result = 0;
|
||||
if (entry1.getValue().getWriteRequestsCount() < entry2.getValue()
|
||||
.getWriteRequestsCount()) {
|
||||
result = -1;
|
||||
} else if (entry1.getValue().getWriteRequestsCount() > entry2.getValue()
|
||||
.getWriteRequestsCount()) {
|
||||
result = 1;
|
||||
}
|
||||
if (reverseOrder) {
|
||||
result = -1 * result;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
});
|
||||
Collections.sort(entryList, (entry1, entry2) -> {
|
||||
if (entry1 == null || entry1.getValue() == null) {
|
||||
return -1;
|
||||
} else if (entry2 == null || entry2.getValue() == null) {
|
||||
return 1;
|
||||
}
|
||||
int result = Long.compare(entry1.getValue().getWriteRequestCount(),
|
||||
entry2.getValue().getWriteRequestCount());
|
||||
if (reverseOrder) {
|
||||
result = -1 * result;
|
||||
}
|
||||
return result;
|
||||
});
|
||||
} else if (sortKey.equals("size")) {
|
||||
Collections.sort(entryList,
|
||||
new Comparator<Map.Entry<RegionInfo, RegionLoad>>() {
|
||||
public int compare(
|
||||
Map.Entry<RegionInfo, RegionLoad> entry1,
|
||||
Map.Entry<RegionInfo, RegionLoad> entry2) {
|
||||
if (entry1 == null || entry1.getValue() == null) {
|
||||
return -1;
|
||||
} else if (entry2 == null || entry2.getValue() == null) {
|
||||
return 1;
|
||||
}
|
||||
int result = 0;
|
||||
if (entry1.getValue().getStorefileSizeMB() < entry2.getValue()
|
||||
.getStorefileSizeMB()) {
|
||||
result = -1;
|
||||
} else if (entry1.getValue().getStorefileSizeMB() > entry2
|
||||
.getValue().getStorefileSizeMB()) {
|
||||
result = 1;
|
||||
}
|
||||
if (reverseOrder) {
|
||||
result = -1 * result;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
});
|
||||
Collections.sort(entryList, (entry1, entry2) -> {
|
||||
if (entry1 == null || entry1.getValue() == null) {
|
||||
return -1;
|
||||
} else if (entry2 == null || entry2.getValue() == null) {
|
||||
return 1;
|
||||
}
|
||||
int result = Double.compare(entry1.getValue().getStoreFileSize().get(),
|
||||
entry2.getValue().getStoreFileSize().get());
|
||||
if (reverseOrder) {
|
||||
result = -1 * result;
|
||||
}
|
||||
return result;
|
||||
});
|
||||
} else if (sortKey.equals("filecount")) {
|
||||
Collections.sort(entryList,
|
||||
new Comparator<Map.Entry<RegionInfo, RegionLoad>>() {
|
||||
public int compare(
|
||||
Map.Entry<RegionInfo, RegionLoad> entry1,
|
||||
Map.Entry<RegionInfo, RegionLoad> entry2) {
|
||||
if (entry1 == null || entry1.getValue() == null) {
|
||||
return -1;
|
||||
} else if (entry2 == null || entry2.getValue() == null) {
|
||||
return 1;
|
||||
}
|
||||
int result = 0;
|
||||
if (entry1.getValue().getStorefiles() < entry2.getValue()
|
||||
.getStorefiles()) {
|
||||
result = -1;
|
||||
} else if (entry1.getValue().getStorefiles() > entry2.getValue()
|
||||
.getStorefiles()) {
|
||||
result = 1;
|
||||
}
|
||||
if (reverseOrder) {
|
||||
result = -1 * result;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
});
|
||||
Collections.sort(entryList, (entry1, entry2) -> {
|
||||
if (entry1 == null || entry1.getValue() == null) {
|
||||
return -1;
|
||||
} else if (entry2 == null || entry2.getValue() == null) {
|
||||
return 1;
|
||||
}
|
||||
int result = Integer.compare(entry1.getValue().getStoreCount(),
|
||||
entry2.getValue().getStoreCount());
|
||||
if (reverseOrder) {
|
||||
result = -1 * result;
|
||||
}
|
||||
return result;
|
||||
});
|
||||
} else if (sortKey.equals("memstore")) {
|
||||
Collections.sort(entryList,
|
||||
new Comparator<Map.Entry<RegionInfo, RegionLoad>>() {
|
||||
public int compare(
|
||||
Map.Entry<RegionInfo, RegionLoad> entry1,
|
||||
Map.Entry<RegionInfo, RegionLoad> entry2) {
|
||||
if (entry1 == null || entry1.getValue()==null) {
|
||||
return -1;
|
||||
} else if (entry2 == null || entry2.getValue()==null) {
|
||||
return 1;
|
||||
}
|
||||
int result = 0;
|
||||
if (entry1.getValue().getMemStoreSizeMB() < entry2.getValue()
|
||||
.getMemStoreSizeMB()) {
|
||||
result = -1;
|
||||
} else if (entry1.getValue().getMemStoreSizeMB() > entry2
|
||||
.getValue().getMemStoreSizeMB()) {
|
||||
result = 1;
|
||||
}
|
||||
if (reverseOrder) {
|
||||
result = -1 * result;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
});
|
||||
Collections.sort(entryList, (entry1, entry2) -> {
|
||||
if (entry1 == null || entry1.getValue() == null) {
|
||||
return -1;
|
||||
} else if (entry2 == null || entry2.getValue() == null) {
|
||||
return 1;
|
||||
}
|
||||
int result = Double.compare(entry1.getValue().getMemStoreSize().get(),
|
||||
entry2.getValue().getMemStoreSize().get());
|
||||
if (reverseOrder) {
|
||||
result = -1 * result;
|
||||
}
|
||||
return result;
|
||||
});
|
||||
} else if (sortKey.equals("locality")) {
|
||||
Collections.sort(entryList,
|
||||
new Comparator<Map.Entry<RegionInfo, RegionLoad>>() {
|
||||
public int compare(
|
||||
Map.Entry<RegionInfo, RegionLoad> entry1,
|
||||
Map.Entry<RegionInfo, RegionLoad> entry2) {
|
||||
if (entry1 == null || entry1.getValue()==null) {
|
||||
return -1;
|
||||
} else if (entry2 == null || entry2.getValue()==null) {
|
||||
return 1;
|
||||
}
|
||||
int result = 0;
|
||||
if (entry1.getValue().getDataLocality() < entry2.getValue()
|
||||
.getDataLocality()) {
|
||||
result = -1;
|
||||
} else if (entry1.getValue().getDataLocality() > entry2
|
||||
.getValue().getDataLocality()) {
|
||||
result = 1;
|
||||
}
|
||||
if (reverseOrder) {
|
||||
result = -1 * result;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
});
|
||||
Collections.sort(entryList, (entry1, entry2) -> {
|
||||
if (entry1 == null || entry1.getValue() == null) {
|
||||
return -1;
|
||||
} else if (entry2 == null || entry2.getValue() == null) {
|
||||
return 1;
|
||||
}
|
||||
int result = Double.compare(entry1.getValue().getDataLocality(),
|
||||
entry2.getValue().getDataLocality());
|
||||
if (reverseOrder) {
|
||||
result = -1 * result;
|
||||
}
|
||||
return result;
|
||||
});
|
||||
}
|
||||
}
|
||||
numRegions = regions.size();
|
||||
|
@ -620,10 +556,10 @@ ShowDetailName&Start/End Key<input type="checkbox" id="showWhole" style="margin-
|
|||
if (numRegionsToRender < 0) {
|
||||
numRegionsToRender = numRegions;
|
||||
}
|
||||
for (Map.Entry<RegionInfo, RegionLoad> hriEntry : entryList) {
|
||||
for (Map.Entry<RegionInfo, RegionMetrics> hriEntry : entryList) {
|
||||
RegionInfo regionInfo = hriEntry.getKey();
|
||||
ServerName addr = regionsToServer.get(regionInfo);
|
||||
RegionLoad load = hriEntry.getValue();
|
||||
RegionMetrics load = hriEntry.getValue();
|
||||
String readReq = "N/A";
|
||||
String writeReq = "N/A";
|
||||
String regionSize = "N/A";
|
||||
|
@ -631,16 +567,16 @@ ShowDetailName&Start/End Key<input type="checkbox" id="showWhole" style="margin-
|
|||
String memSize = "N/A";
|
||||
float locality = 0.0f;
|
||||
if(load != null) {
|
||||
readReq = String.format("%,1d", load.getReadRequestsCount());
|
||||
writeReq = String.format("%,1d", load.getWriteRequestsCount());
|
||||
regionSize = StringUtils.byteDesc(load.getStorefileSizeMB()*1024l*1024);
|
||||
fileCount = String.format("%,1d", load.getStorefiles());
|
||||
memSize = StringUtils.byteDesc(load.getMemStoreSizeMB()*1024l*1024);
|
||||
readReq = String.format("%,1d", load.getReadRequestCount());
|
||||
writeReq = String.format("%,1d", load.getWriteRequestCount());
|
||||
regionSize = StringUtils.byteDesc((long) load.getStoreFileSize().get(Size.Unit.BYTE));
|
||||
fileCount = String.format("%,1d", load.getStoreFileCount());
|
||||
memSize = StringUtils.byteDesc((long) load.getMemStoreSize().get(Size.Unit.BYTE));
|
||||
locality = load.getDataLocality();
|
||||
}
|
||||
|
||||
if (addr != null) {
|
||||
ServerLoad sl = master.getServerManager().getLoad(addr);
|
||||
ServerMetrics sl = master.getServerManager().getLoad(addr);
|
||||
// This port might be wrong if RS actually ended up using something else.
|
||||
urlRegionServer =
|
||||
"//" + URLEncoder.encode(addr.getHostname()) + ":" + master.getRegionServerInfoPort(addr) + "/";
|
||||
|
|
|
@ -29,8 +29,9 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.HBaseIOException;
|
||||
import org.apache.hadoop.hbase.RegionLoad;
|
||||
import org.apache.hadoop.hbase.RegionMetrics;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.Size;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||
|
@ -363,15 +364,16 @@ public class TestSimpleRegionNormalizer {
|
|||
getRegionServerOfRegion(any())).thenReturn(sn);
|
||||
|
||||
for (Map.Entry<byte[], Integer> region : regionSizes.entrySet()) {
|
||||
RegionLoad regionLoad = Mockito.mock(RegionLoad.class);
|
||||
when(regionLoad.getName()).thenReturn(region.getKey());
|
||||
when(regionLoad.getStorefileSizeMB()).thenReturn(region.getValue());
|
||||
RegionMetrics regionLoad = Mockito.mock(RegionMetrics.class);
|
||||
when(regionLoad.getRegionName()).thenReturn(region.getKey());
|
||||
when(regionLoad.getStoreFileSize())
|
||||
.thenReturn(new Size(region.getValue(), Size.Unit.MEGABYTE));
|
||||
|
||||
// this is possibly broken with jdk9, unclear if false positive or not
|
||||
// suppress it for now, fix it when we get to running tests on 9
|
||||
// see: http://errorprone.info/bugpattern/MockitoCast
|
||||
when((Object) masterServices.getServerManager().getLoad(sn).
|
||||
getRegionsLoad().get(region.getKey())).thenReturn(regionLoad);
|
||||
getRegionMetrics().get(region.getKey())).thenReturn(regionLoad);
|
||||
}
|
||||
try {
|
||||
when(masterRpcServices.isSplitOrMergeEnabled(any(),
|
||||
|
|
Loading…
Reference in New Issue