HBASE-1030 Bit of polish on HBASE-1018

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@722286 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2008-12-01 23:30:04 +00:00
parent 08eef7d821
commit 9673c4ad75
8 changed files with 178 additions and 77 deletions

View File

@ -146,6 +146,7 @@ Release 0.19.0 - Unreleased
HBASE-1020 Regionserver OOME handler should dump vital stats
HBASE-1018 Regionservers should report detailed health to master
HBASE-1034 Remove useless TestToString unit test
HBASE-1030 Bit of polish on HBASE-1018
NEW FEATURES
HBASE-875 Use MurmurHash instead of JenkinsHash [in bloomfilters]

View File

@ -27,13 +27,16 @@ import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Strings;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
/**
* This class encapsulates metrics for determining the load on a HRegionServer
*/
@SuppressWarnings("unused")
public class HServerLoad implements WritableComparable {
/** number of regions */
// could just use regionLoad.size() but master.RegionManager likes to play
@ -52,7 +55,7 @@ public class HServerLoad implements WritableComparable {
/**
* Encapsulates per-region loading metrics.
*/
class RegionLoad implements Writable {
public static class RegionLoad implements Writable {
/** the region name */
private byte[] name;
/** the number of stores for the region */
@ -185,6 +188,19 @@ public class HServerLoad implements WritableComparable {
out.writeInt(memcacheSizeMB);
out.writeInt(storefileIndexSizeMB);
}
@Override
public String toString() {
StringBuilder sb = Strings.appendKeyValue(new StringBuilder(), "stores",
Integer.valueOf(this.stores));
sb = Strings.appendKeyValue(sb, "storefiles",
Integer.valueOf(this.storefiles));
sb = Strings.appendKeyValue(sb, "memcacheSize",
Integer.valueOf(this.memcacheSizeMB));
sb = Strings.appendKeyValue(sb, "storefileIndexSize",
Integer.valueOf(this.storefileIndexSizeMB));
return sb.toString();
}
}
/*
@ -256,33 +272,13 @@ public class HServerLoad implements WritableComparable {
*/
public String toString(int msgInterval) {
StringBuilder sb = new StringBuilder();
sb.append("requests: ");
sb.append(numberOfRequests/msgInterval);
sb.append(" usedHeapMB: ");
sb.append(usedHeapMB);
sb.append(" maxHeapMB: ");
sb.append(maxHeapMB);
sb.append(" regions: ");
sb.append(numberOfRegions);
Iterator<RegionLoad> i = regionLoad.iterator();
sb.append(" {");
while (i.hasNext()) {
RegionLoad rl = i.next();
sb.append(" { name: '");
sb.append(Bytes.toString(rl.name));
sb.append("' stores: ");
sb.append(rl.stores);
sb.append(" storefiles: ");
sb.append(rl.storefiles);
sb.append(" memcacheSizeMB: ");
sb.append(rl.memcacheSizeMB);
sb.append(" storefileIndexSizeMB: ");
sb.append(rl.storefileIndexSizeMB);
sb.append(" }");
if (i.hasNext())
sb.append(',');
}
sb.append(" }");
sb = Strings.appendKeyValue(sb, "requests",
Integer.valueOf(numberOfRequests/msgInterval));
sb = Strings.appendKeyValue(sb, "regions",
Integer.valueOf(numberOfRegions));
sb = Strings.appendKeyValue(sb, "usedHeap",
Integer.valueOf(this.usedHeapMB));
sb = Strings.appendKeyValue(sb, "maxHeap", Integer.valueOf(maxHeapMB));
return sb.toString();
}
@ -379,19 +375,26 @@ public class HServerLoad implements WritableComparable {
}
/**
* @param name
* @param stores
* @param storefiles
* @param memcacheSizeMB
* @param storefileIndexSizeMB
* @param load Instance of HServerLoad
*/
public void addRegionInfo(final HServerLoad.RegionLoad load) {
this.numberOfRegions++;
this.regionLoad.add(load);
}
/**
-* @param name
-* @param stores
-* @param storefiles
-* @param memcacheSizeMB
-* @param storefileIndexSizeMB
* @deprecated Use {@link #addRegionInfo(RegionLoad)}
*/
public void addRegionInfo(final byte[] name, final int stores,
final int storefiles, final int memcacheSizeMB,
final int storefileIndexSizeMB) {
this.numberOfRegions++;
this.regionLoad.add(
new RegionLoad(name, stores, storefiles, memcacheSizeMB,
storefileIndexSizeMB));
this.regionLoad.add(new HServerLoad.RegionLoad(name, stores, storefiles,
memcacheSizeMB, storefileIndexSizeMB));
}
// Writable

View File

@ -29,12 +29,9 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.io.MapFile;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.onelab.filter.Key;
/**
* Hbase customizations of MapFile.

View File

@ -326,25 +326,11 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
doMetrics();
MemoryUsage memory =
ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
HServerLoad hsl = new HServerLoad(requestCount.get(),
HServerLoad hsl = new HServerLoad(requestCount.get(),
(int)(memory.getUsed()/1024/1024),
(int)(memory.getMax()/1024/1024));
for (HRegion r: onlineRegions.values()) {
byte[] name = r.getRegionName();
int stores = 0;
int storefiles = 0;
int memcacheSizeMB = (int)(r.memcacheSize.get()/1024/1024);
int storefileIndexSizeMB = 0;
synchronized (r.stores) {
stores += r.stores.size();
for (HStore store: r.stores.values()) {
storefiles += store.getStorefilesCount();
storefileIndexSizeMB +=
(int)(store.getStorefilesIndexSize()/1024/1024);
}
}
hsl.addRegionInfo(name, stores, storefiles, memcacheSizeMB,
storefileIndexSizeMB);
hsl.addRegionInfo(createRegionLoad(r));
}
this.serverInfo.setLoad(hsl);
this.requestCount.set(0);
@ -579,7 +565,41 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
throw ex;
}
}
/*
* @param r Region to get RegionLoad for.
* @return RegionLoad instance.
* @throws IOException
*/
private HServerLoad.RegionLoad createRegionLoad(final HRegion r)
throws IOException {
byte[] name = r.getRegionName();
int stores = 0;
int storefiles = 0;
int memcacheSizeMB = (int)(r.memcacheSize.get()/1024/1024);
int storefileIndexSizeMB = 0;
synchronized (r.stores) {
stores += r.stores.size();
for (HStore store: r.stores.values()) {
storefiles += store.getStorefilesCount();
storefileIndexSizeMB +=
(int)(store.getStorefilesIndexSize()/1024/1024);
}
}
return new HServerLoad.RegionLoad(name, stores, storefiles, memcacheSizeMB,
storefileIndexSizeMB);
}
/**
* @param regionName
* @return An instance of RegionLoad.
* @throws IOException
*/
public HServerLoad.RegionLoad createRegionLoad(final byte [] regionName)
throws IOException {
return createRegionLoad(this.onlineRegions.get(Bytes.mapKey(regionName)));
}
/*
* Check if an OOME and if so, call abort.
* @param e

View File

@ -17,8 +17,13 @@
*/
package org.apache.hadoop.hbase.regionserver.metrics;
import java.lang.management.ManagementFactory;
import java.lang.management.MemoryMXBean;
import java.lang.management.MemoryUsage;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.util.Strings;
import org.apache.hadoop.metrics.MetricsContext;
import org.apache.hadoop.metrics.MetricsRecord;
import org.apache.hadoop.metrics.MetricsUtil;
@ -38,6 +43,7 @@ public class RegionServerMetrics implements Updater {
private final Log LOG = LogFactory.getLog(this.getClass());
private final MetricsRecord metricsRecord;
private long lastUpdate = System.currentTimeMillis();
private static final int MB = 1024*1024;
/**
* Count of regions carried by this regionserver
@ -77,6 +83,7 @@ public class RegionServerMetrics implements Updater {
String name = Thread.currentThread().getName();
metricsRecord.setTag("RegionServer", name);
context.registerUpdater(this);
// Add jvmmetrics.
JvmMetrics.init("RegionServer", name);
LOG.info("Initialized");
}
@ -129,24 +136,30 @@ public class RegionServerMetrics implements Updater {
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("requests=");
int seconds = (int)((System.currentTimeMillis() - this.lastUpdate)/1000);
if (seconds == 0) {
seconds = 1;
}
sb.append(this.requests.get()/seconds);
sb.append(", regions=");
sb.append(this.regions.get());
sb.append(", stores=");
sb.append(this.stores.get());
sb.append(", storefiles=");
sb.append(this.storefiles.get());
sb.append(", storefileIndexSize=");
sb.append(this.storefileIndexSizeMB.get());
sb.append("MB");
sb.append(", memcacheSize=");
sb.append(this.memcacheSizeMB.get());
sb.append("MB");
sb = Strings.appendKeyValue(sb, "request",
Integer.valueOf(this.requests.get()/seconds));
sb = Strings.appendKeyValue(sb, "regions",
Integer.valueOf(this.regions.get()));
sb = Strings.appendKeyValue(sb, "stores",
Integer.valueOf(this.stores.get()));
sb = Strings.appendKeyValue(sb, "storefiles",
Integer.valueOf(this.storefiles.get()));
sb = Strings.appendKeyValue(sb, "storefileIndexSize",
Integer.valueOf(this.storefileIndexSizeMB.get()));
sb = Strings.appendKeyValue(sb, "memcacheSize",
Integer.valueOf(this.memcacheSizeMB.get()));
// Duplicate from jvmmetrics because metrics are private there so
// inaccessible.
MemoryUsage memory =
ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
sb = Strings.appendKeyValue(sb, "usedHeap",
Long.valueOf(memory.getUsed()/MB));
sb = Strings.appendKeyValue(sb, "maxHeap",
Long.valueOf(memory.getMax()/MB));
return sb.toString();
}
}
}

View File

@ -0,0 +1,61 @@
/**
* Copyright 2008 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.util;
/**
* Utillity for Strings.
*/
public class Strings {
public final static String DEFAULT_SEPARATOR = "=";
public final static String DEFAULT_KEYVALUE_SEPARATOR = ", ";
/**
* Append to a StringBuilder a key/value.
* Uses default separators.
* @param sb StringBuilder to use
* @param key Key to append.
* @param value Value to append.
* @return Passed <code>sb</code> populated with key/value.
*/
public static StringBuilder appendKeyValue(final StringBuilder sb,
final String key, final Object value) {
return appendKeyValue(sb, key, value, DEFAULT_SEPARATOR,
DEFAULT_KEYVALUE_SEPARATOR);
}
/**
* Append to a StringBuilder a key/value.
* Uses default separators.
* @param sb StringBuilder to use
* @param key Key to append.
* @param value Value to append.
* @param separator Value to use between key and value.
* @param keyValueSeparator Value to use between key/value sets.
* @return Passed <code>sb</code> populated with key/value.
*/
public static StringBuilder appendKeyValue(final StringBuilder sb,
final String key, final Object value, final String separator,
final String keyValueSeparator) {
if (sb.length() > 0) {
sb.append(keyValueSeparator);
}
return sb.append(key).append(separator).append(value);
}
}

View File

@ -96,7 +96,7 @@
%>
<tr><td><a href="<%= url %>"><%= hostname %></a></td><td><%= startCode %></td><td><%= hsi.getLoad().toString(interval) %></td></tr>
<% } %>
<tr><th>Total: </th><td>servers: <%= serverToServerInfos.size() %></td><td>&nbsp;</td><td>requests: <%= totalRequests %> regions: <%= totalRegions %></td></tr>
<tr><th>Total: </th><td>servers: <%= serverToServerInfos.size() %></td><td>&nbsp;</td><td>requests=<%= totalRequests %>, regions=<%= totalRegions %></td></tr>
</table>
<p>Load is requests per second and count of regions loaded</p>

View File

@ -7,12 +7,14 @@
import="org.apache.hadoop.hbase.util.Bytes"
import="org.apache.hadoop.hbase.HConstants"
import="org.apache.hadoop.hbase.HServerInfo"
import="org.apache.hadoop.hbase.HServerLoad"
import="org.apache.hadoop.hbase.HRegionInfo" %><%
HRegionServer regionServer = (HRegionServer)getServletContext().getAttribute(HRegionServer.REGIONSERVER);
HServerInfo serverInfo = regionServer.getServerInfo();
RegionServerMetrics metrics = regionServer.getMetrics();
Collection<HRegionInfo> onlineRegions = regionServer.getSortedOnlineRegionInfos();
int interval = regionServer.getConfiguration().getInt("hbase.regionserver.msginterval", 3000)/1000;
%><?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
@ -34,16 +36,20 @@
<tr><th>Attribute Name</th><th>Value</th><th>Description</th></tr>
<tr><td>HBase Version</td><td><%= org.apache.hadoop.hbase.util.VersionInfo.getVersion() %>, r<%= org.apache.hadoop.hbase.util.VersionInfo.getRevision() %></td><td>HBase version and svn revision</td></tr>
<tr><td>HBase Compiled</td><td><%= org.apache.hadoop.hbase.util.VersionInfo.getDate() %>, <%= org.apache.hadoop.hbase.util.VersionInfo.getUser() %></td><td>When HBase version was compiled and by whom</td></tr>
<tr><td>Metrics</td><td><%= metrics.toString() %></td><td>RegionServer Metrics</td></tr>
<tr><td>Metrics</td><td><%= metrics.toString() %></td><td>RegionServer Metrics; file and heap sizes are in megabytes</td></tr>
</table>
<h2>Online Regions</h2>
<% if (onlineRegions != null && onlineRegions.size() > 0) { %>
<table>
<tr><th>Region Name</th><th>Encoded Name</th><th>Start Key</th><th>End Key</th></tr>
<% for (HRegionInfo r: onlineRegions) { %>
<tr><th>Region Name</th><th>Encoded Name</th><th>Start Key</th><th>End Key</th><th>Metrics</th></tr>
<% for (HRegionInfo r: onlineRegions) {
HServerLoad.RegionLoad load = regionServer.createRegionLoad(r.getRegionName());
%>
<tr><td><%= r.getRegionNameAsString() %></td><td><%= r.getEncodedName() %></td>
<td><%= Bytes.toString(r.getStartKey()) %></td><td><%= Bytes.toString(r.getEndKey()) %></td></tr>
<td><%= Bytes.toString(r.getStartKey()) %></td><td><%= Bytes.toString(r.getEndKey()) %></td>
<td><%= load.toString() %></td>
</tr>
<% } %>
</table>
<p>Region names are made of the containing table's name, a comma,