HBASE-2021 Add compaction details to master UI
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@895912 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
95ed506323
commit
9cacb67b69
|
@ -151,6 +151,9 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
|
|||
private final ServerManager serverManager;
|
||||
private final RegionManager regionManager;
|
||||
|
||||
private long lastFragmentationQuery = -1L;
|
||||
private Map<String, Integer> fragmentation = null;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* @param conf configuration
|
||||
|
@ -1233,6 +1236,17 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
|
|||
}
|
||||
}
|
||||
|
||||
public Map<String, Integer> getTableFragmentation() throws IOException {
|
||||
long now = System.currentTimeMillis();
|
||||
// only check every two minutes by default
|
||||
int check = this.conf.getInt("hbase.master.fragmentation.check.frequency", 2 * 60 * 1000);
|
||||
if (lastFragmentationQuery == -1 || now - lastFragmentationQuery > check) {
|
||||
fragmentation = FSUtils.getTableFragmentation(this);
|
||||
lastFragmentationQuery = now;
|
||||
}
|
||||
return fragmentation;
|
||||
}
|
||||
|
||||
/**
|
||||
* Main program
|
||||
* @param args
|
||||
|
@ -1240,4 +1254,4 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
|
|||
public static void main(String [] args) {
|
||||
doMain(args, HMaster.class);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -217,4 +217,14 @@ class CompactSplitThread extends Thread implements HConstants {
|
|||
this.interrupt();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the current size of the queue containing regions that are
|
||||
* processed.
|
||||
*
|
||||
* @return The current size of the regions queue.
|
||||
*/
|
||||
public int getCompactionQueueSize() {
|
||||
return compactionQueue.size();
|
||||
}
|
||||
}
|
|
@ -1010,6 +1010,8 @@ public class HRegionServer implements HConstants, HRegionInterface,
|
|||
this.metrics.storefiles.set(storefiles);
|
||||
this.metrics.memstoreSizeMB.set((int)(memstoreSize/(1024*1024)));
|
||||
this.metrics.storefileIndexSizeMB.set((int)(storefileIndexSize/(1024*1024)));
|
||||
this.metrics.compactionQueueSize.set(compactSplitThread.
|
||||
getCompactionQueueSize());
|
||||
|
||||
LruBlockCache lruBlockCache = (LruBlockCache)StoreFile.getBlockCache(conf);
|
||||
if (lruBlockCache != null) {
|
||||
|
|
|
@ -107,6 +107,12 @@ public class RegionServerMetrics implements Updater {
|
|||
public final MetricsIntValue memstoreSizeMB =
|
||||
new MetricsIntValue("memstoreSizeMB", registry);
|
||||
|
||||
/**
|
||||
* Size of the compaction queue.
|
||||
*/
|
||||
public final MetricsIntValue compactionQueueSize =
|
||||
new MetricsIntValue("compactionQueueSize", registry);
|
||||
|
||||
/**
|
||||
* filesystem read latency
|
||||
*/
|
||||
|
@ -158,7 +164,7 @@ public class RegionServerMetrics implements Updater {
|
|||
this.memstoreSizeMB.pushMetric(this.metricsRecord);
|
||||
this.regions.pushMetric(this.metricsRecord);
|
||||
this.requests.pushMetric(this.metricsRecord);
|
||||
|
||||
this.compactionQueueSize.pushMetric(this.metricsRecord);
|
||||
this.blockCacheSize.pushMetric(this.metricsRecord);
|
||||
this.blockCacheFree.pushMetric(this.metricsRecord);
|
||||
this.blockCacheCount.pushMetric(this.metricsRecord);
|
||||
|
@ -218,6 +224,8 @@ public class RegionServerMetrics implements Updater {
|
|||
Integer.valueOf(this.storefileIndexSizeMB.get()));
|
||||
sb = Strings.appendKeyValue(sb, "memstoreSize",
|
||||
Integer.valueOf(this.memstoreSizeMB.get()));
|
||||
sb = Strings.appendKeyValue(sb, "compactionQueueSize",
|
||||
Integer.valueOf(this.compactionQueueSize.get()));
|
||||
// Duplicate from jvmmetrics because metrics are private there so
|
||||
// inaccessible.
|
||||
MemoryUsage memory =
|
||||
|
|
|
@ -23,6 +23,8 @@ import java.io.DataInputStream;
|
|||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -36,6 +38,7 @@ import org.apache.hadoop.fs.PathFilter;
|
|||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
import org.apache.hadoop.hbase.master.HMaster;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.protocol.FSConstants;
|
||||
|
@ -360,6 +363,98 @@ public class FSUtils {
|
|||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the total overall fragmentation percentage. Includes .META. and
|
||||
* -ROOT- as well.
|
||||
*
|
||||
* @param master The master defining the HBase root and file system.
|
||||
* @return A map for each table and its percentage.
|
||||
* @throws IOException When scanning the directory fails.
|
||||
*/
|
||||
public static int getTotalTableFragmentation(final HMaster master)
|
||||
throws IOException {
|
||||
Map<String, Integer> map = getTableFragmentation(master);
|
||||
return map != null && map.size() > 0 ? map.get("-TOTAL-").intValue() : -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs through the HBase rootdir and checks how many stores for each table
|
||||
* have more than one file in them. Checks -ROOT- and .META. too. The total
|
||||
* percentage across all tables is stored under the special key "-TOTAL-".
|
||||
*
|
||||
* @param master The master defining the HBase root and file system.
|
||||
* @return A map for each table and its percentage.
|
||||
* @throws IOException When scanning the directory fails.
|
||||
*/
|
||||
public static Map<String, Integer> getTableFragmentation(
|
||||
final HMaster master)
|
||||
throws IOException {
|
||||
Path path = master.getRootDir();
|
||||
// since HMaster.getFileSystem() is package private
|
||||
FileSystem fs = path.getFileSystem(master.getConfiguration());
|
||||
return getTableFragmentation(fs, path);
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs through the HBase rootdir and checks how many stores for each table
|
||||
* have more than one file in them. Checks -ROOT- and .META. too. The total
|
||||
* percentage across all tables is stored under the special key "-TOTAL-".
|
||||
*
|
||||
* @param fs The file system to use.
|
||||
* @param hbaseRootDir The root directory to scan.
|
||||
* @return A map for each table and its percentage.
|
||||
* @throws IOException When scanning the directory fails.
|
||||
*/
|
||||
public static Map<String, Integer> getTableFragmentation(
|
||||
final FileSystem fs, final Path hbaseRootDir)
|
||||
throws IOException {
|
||||
Map<String, Integer> frags = new HashMap<String, Integer>();
|
||||
int cfCountTotal = 0;
|
||||
int cfFragTotal = 0;
|
||||
DirFilter df = new DirFilter(fs);
|
||||
// presumes any directory under hbase.rootdir is a table
|
||||
FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df);
|
||||
for (int i = 0; i < tableDirs.length; i++) {
|
||||
// Skip the .log directory. All others should be tables. Inside a table,
|
||||
// there are compaction.dir directories to skip. Otherwise, all else
|
||||
// should be regions. Then in each region, should only be family
|
||||
// directories. Under each of these, should be one file only.
|
||||
Path d = tableDirs[i].getPath();
|
||||
if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
|
||||
continue;
|
||||
}
|
||||
int cfCount = 0;
|
||||
int cfFrag = 0;
|
||||
FileStatus [] regionDirs = fs.listStatus(d, df);
|
||||
for (int j = 0; j < regionDirs.length; j++) {
|
||||
Path dd = regionDirs[j].getPath();
|
||||
if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
|
||||
continue;
|
||||
}
|
||||
// else its a region name, now look in region for families
|
||||
FileStatus [] familyDirs = fs.listStatus(dd, df);
|
||||
for (int k = 0; k < familyDirs.length; k++) {
|
||||
cfCount++;
|
||||
cfCountTotal++;
|
||||
Path family = familyDirs[k].getPath();
|
||||
// now in family make sure only one file
|
||||
FileStatus [] familyStatus = fs.listStatus(family);
|
||||
if (familyStatus.length > 1) {
|
||||
cfFrag++;
|
||||
cfFragTotal++;
|
||||
}
|
||||
}
|
||||
}
|
||||
// compute percentage per table and store in result list
|
||||
frags.put(d.getName(), new Integer(
|
||||
Math.round((float) cfFrag / cfCount * 100)));
|
||||
}
|
||||
// set overall percentage for all tables
|
||||
frags.put("-TOTAL-", new Integer(
|
||||
Math.round((float) cfFragTotal / cfCountTotal * 100)));
|
||||
return frags;
|
||||
}
|
||||
|
||||
/**
|
||||
* Expects to find -ROOT- directory.
|
||||
* @param fs
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
import="org.apache.hadoop.conf.Configuration"
|
||||
import="org.apache.hadoop.io.Text"
|
||||
import="org.apache.hadoop.hbase.util.Bytes"
|
||||
import="org.apache.hadoop.hbase.util.FSUtils"
|
||||
import="org.apache.hadoop.hbase.master.HMaster"
|
||||
import="org.apache.hadoop.hbase.HConstants"
|
||||
import="org.apache.hadoop.hbase.master.MetaRegion"
|
||||
|
@ -24,6 +25,7 @@
|
|||
if (interval == 0) {
|
||||
interval = 1;
|
||||
}
|
||||
Map<String, Integer> frags = master.getTableFragmentation();
|
||||
%><?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
|
||||
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
|
||||
|
@ -49,6 +51,7 @@
|
|||
<tr><td>HBase Root Directory</td><td><%= master.getRootDir().toString() %></td><td>Location of HBase home directory</td></tr>
|
||||
<tr><td>Load average</td><td><%= master.getServerManager().getAverageLoad() %></td><td>Average number of regions per regionserver. Naive computation.</td></tr>
|
||||
<tr><td>Regions On FS</td><td><%= master.getRegionManager().countRegionsOnFS() %></td><td>Number of regions on FileSystem. Rough count.</td></tr>
|
||||
<tr><td>Fragmentation</td><td><%= frags.get("-TOTAL-") != null ? frags.get("-TOTAL-").intValue() + "%" : "n/a" %></td><td>Overall fragmentation of all tables, including .META. and -ROOT-.</td></tr>
|
||||
<tr><td>Zookeeper Quorum</td><td><%= master.getZooKeeperWrapper().getQuorumServers() %></td><td>Addresses of all registered ZK servers. For more, see <a href="/zk.jsp">zk dump</a>.</td></tr>
|
||||
</table>
|
||||
|
||||
|
@ -56,11 +59,17 @@
|
|||
<%
|
||||
if (rootLocation != null) { %>
|
||||
<table>
|
||||
<tr><th>Table</th><th>Description</th></tr>
|
||||
<tr><td><a href="/table.jsp?name=<%= Bytes.toString(HConstants.ROOT_TABLE_NAME) %>"><%= Bytes.toString(HConstants.ROOT_TABLE_NAME) %></a></td><td>The -ROOT- table holds references to all .META. regions.</td></tr>
|
||||
<tr><th>Table</th><th title="Fragmentation - Will be 0% after a major compaction and fluctuate during normal usage.">Frag.</th><th>Description</th></tr>
|
||||
<tr><td><a href="/table.jsp?name=<%= Bytes.toString(HConstants.ROOT_TABLE_NAME) %>"><%= Bytes.toString(HConstants.ROOT_TABLE_NAME) %></a></td>
|
||||
<td align="center"><%= frags.get("-ROOT-") != null ? frags.get("-ROOT-").intValue() + "%" : "n/a" %></td>
|
||||
<td>The -ROOT- table holds references to all .META. regions.</td>
|
||||
</tr>
|
||||
<%
|
||||
if (onlineRegions != null && onlineRegions.size() > 0) { %>
|
||||
<tr><td><a href="/table.jsp?name=<%= Bytes.toString(HConstants.META_TABLE_NAME) %>"><%= Bytes.toString(HConstants.META_TABLE_NAME) %></a></td><td>The .META. table holds references to all User Table regions</td></tr>
|
||||
<tr><td><a href="/table.jsp?name=<%= Bytes.toString(HConstants.META_TABLE_NAME) %>"><%= Bytes.toString(HConstants.META_TABLE_NAME) %></a></td>
|
||||
<td align="center"><%= frags.get(".META.") != null ? frags.get(".META.").intValue() + "%" : "n/a" %></td>
|
||||
<td>The .META. table holds references to all User Table regions</td>
|
||||
</tr>
|
||||
|
||||
<% } %>
|
||||
</table>
|
||||
|
@ -70,9 +79,12 @@
|
|||
<% HTableDescriptor[] tables = new HBaseAdmin(conf).listTables();
|
||||
if(tables != null && tables.length > 0) { %>
|
||||
<table>
|
||||
<tr><th>Table</th><th>Description</th></tr>
|
||||
<tr><th>Table</th><th title="Fragmentation - Will be 0% after a major compaction and fluctuate during normal usage.">Frag.</th><th>Description</th></tr>
|
||||
<% for(HTableDescriptor htDesc : tables ) { %>
|
||||
<tr><td><a href=/table.jsp?name=<%= htDesc.getNameAsString() %>><%= htDesc.getNameAsString() %></a> </td><td><%= htDesc.toString() %></td></tr>
|
||||
<tr><td><a href=/table.jsp?name=<%= htDesc.getNameAsString() %>><%= htDesc.getNameAsString() %></a> </td>
|
||||
<td align="center"><%= frags.get(htDesc.getNameAsString()) != null ? frags.get(htDesc.getNameAsString()).intValue() + "%" : "n/a" %></td>
|
||||
<td><%= htDesc.toString() %></td>
|
||||
</tr>
|
||||
<% } %>
|
||||
|
||||
<p> <%= tables.length %> table(s) in set.</p>
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
master.getServerManager().getServerAddressToServerInfo();
|
||||
String tableHeader = "<h2>Table Regions</h2><table><tr><th>Name</th><th>Region Server</th><th>Encoded Name</th><th>Start Key</th><th>End Key</th></tr>";
|
||||
HServerAddress rootLocation = master.getRegionManager().getRootRegionLocation();
|
||||
Map<String, Integer> frags = master.getTableFragmentation();
|
||||
%>
|
||||
|
||||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
|
@ -121,6 +122,7 @@
|
|||
<table>
|
||||
<tr><th>Attribute Name</th><th>Value</th><th>Description</th></tr>
|
||||
<tr><td>Enabled</td><td><%= hbadmin.isTableEnabled(table.getTableName()) %></td><td>Is the table enabled</td></tr>
|
||||
<tr><td>Fragmentation</td><td><%= frags.get(tableName) != null ? frags.get(tableName).intValue() + "%" : "n/a" %></td><td>How fragmented is the table. After a major compaction it is 0%.</td></tr>
|
||||
</table>
|
||||
<%
|
||||
Map<HRegionInfo, HServerAddress> regions = table.getRegionsInfo();
|
||||
|
|
Loading…
Reference in New Issue