HBASE-3017 More log pruning

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@999150 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2010-09-20 22:31:19 +00:00
parent 0cdd2c5eb7
commit 4f0d6bc6be
4 changed files with 11 additions and 9 deletions

View File

@ -682,19 +682,19 @@ public class AssignmentManager extends ZooKeeperListener {
// Get all available servers // Get all available servers
List<HServerInfo> servers = serverManager.getOnlineServersList(); List<HServerInfo> servers = serverManager.getOnlineServersList();
LOG.info("Assigning " + allRegions.size() + " across " + servers.size() + LOG.info("Assigning " + allRegions.size() + " regions across " + servers.size() +
" servers"); " servers");
// Generate a cluster startup region placement plan // Generate a cluster startup region placement plan
Map<HServerInfo,List<HRegionInfo>> bulkPlan = Map<HServerInfo,List<HRegionInfo>> bulkPlan =
LoadBalancer.bulkAssignment(allRegions, servers); LoadBalancer.bulkAssignment(allRegions, servers);
// For each server, create OFFLINE nodes and send OPEN RPCs // For each server, create OFFLINE nodes and send OPEN RPCs
for(Map.Entry<HServerInfo,List<HRegionInfo>> entry : bulkPlan.entrySet()) { for (Map.Entry<HServerInfo,List<HRegionInfo>> entry : bulkPlan.entrySet()) {
HServerInfo server = entry.getKey(); HServerInfo server = entry.getKey();
List<HRegionInfo> regions = entry.getValue(); List<HRegionInfo> regions = entry.getValue();
LOG.debug("Assigning " + regions.size() + " regions to " + server); LOG.debug("Assigning " + regions.size() + " regions to " + server);
for(HRegionInfo region : regions) { for (HRegionInfo region : regions) {
LOG.debug("Assigning " + region.getRegionNameAsString() + " to " + server); LOG.debug("Assigning " + region.getRegionNameAsString() + " to " + server);
String regionName = region.getEncodedName(); String regionName = region.getEncodedName();
RegionPlan plan = new RegionPlan(region, null,server); RegionPlan plan = new RegionPlan(region, null,server);

View File

@ -27,11 +27,11 @@ import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentHashMap;
import java.text.DecimalFormat;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.hbase.Chore; import org.apache.hadoop.hbase.Chore;
import org.apache.hadoop.hbase.HMsg; import org.apache.hadoop.hbase.HMsg;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
@ -94,8 +94,6 @@ public class ServerManager {
private final DeadServer deadservers = new DeadServer(); private final DeadServer deadservers = new DeadServer();
private static final DecimalFormat DF = new DecimalFormat("#.##");
/** /**
* Dumps into log current stats on dead servers and number of servers * Dumps into log current stats on dead servers and number of servers
* TODO: Make this a metric; dump metrics into log. * TODO: Make this a metric; dump metrics into log.
@ -112,7 +110,7 @@ public class ServerManager {
double averageLoad = getAverageLoad(); double averageLoad = getAverageLoad();
String deadServersList = deadservers.toString(); String deadServersList = deadservers.toString();
LOG.info("regionservers=" + numServers + LOG.info("regionservers=" + numServers +
", averageload=" + DF.format(averageLoad) + ", averageload=" + StringUtils.limitDecimalTo2(averageLoad) +
((numDeadServers > 0)? ("deadservers=" + deadServersList): "")); ((numDeadServers > 0)? ("deadservers=" + deadServersList): ""));
} }
} }

View File

@ -196,6 +196,9 @@ class SplitTransaction {
services.removeFromOnlineRegions(this.parent.getRegionInfo().getEncodedName()); services.removeFromOnlineRegions(this.parent.getRegionInfo().getEncodedName());
} }
this.journal.add(JournalEntry.OFFLINED_PARENT); this.journal.add(JournalEntry.OFFLINED_PARENT);
// TODO: If the below were multithreaded would we complete steps in less
// elapsed time? St.Ack 20100920
splitStoreFiles(this.splitdir, hstoreFilesToSplit); splitStoreFiles(this.splitdir, hstoreFilesToSplit);
// splitStoreFiles creates daughter region dirs under the parent splits dir // splitStoreFiles creates daughter region dirs under the parent splits dir

View File

@ -1,6 +1,7 @@
<%@ page contentType="text/html;charset=UTF-8" <%@ page contentType="text/html;charset=UTF-8"
import="java.util.*" import="java.util.*"
import="org.apache.hadoop.conf.Configuration" import="org.apache.hadoop.conf.Configuration"
import="org.apache.hadoop.util.StringUtils"
import="org.apache.hadoop.hbase.util.Bytes" import="org.apache.hadoop.hbase.util.Bytes"
import="org.apache.hadoop.hbase.util.JvmVersion" import="org.apache.hadoop.hbase.util.JvmVersion"
import="org.apache.hadoop.hbase.util.FSUtils" import="org.apache.hadoop.hbase.util.FSUtils"
@ -66,7 +67,7 @@
<tr><td>Hadoop Version</td><td><%= org.apache.hadoop.util.VersionInfo.getVersion() %>, r<%= org.apache.hadoop.util.VersionInfo.getRevision() %></td><td>Hadoop version and svn revision</td></tr> <tr><td>Hadoop Version</td><td><%= org.apache.hadoop.util.VersionInfo.getVersion() %>, r<%= org.apache.hadoop.util.VersionInfo.getRevision() %></td><td>Hadoop version and svn revision</td></tr>
<tr><td>Hadoop Compiled</td><td><%= org.apache.hadoop.util.VersionInfo.getDate() %>, <%= org.apache.hadoop.util.VersionInfo.getUser() %></td><td>When Hadoop version was compiled and by whom</td></tr> <tr><td>Hadoop Compiled</td><td><%= org.apache.hadoop.util.VersionInfo.getDate() %>, <%= org.apache.hadoop.util.VersionInfo.getUser() %></td><td>When Hadoop version was compiled and by whom</td></tr>
<tr><td>HBase Root Directory</td><td><%= FSUtils.getRootDir(master.getConfiguration()).toString() %></td><td>Location of HBase home directory</td></tr> <tr><td>HBase Root Directory</td><td><%= FSUtils.getRootDir(master.getConfiguration()).toString() %></td><td>Location of HBase home directory</td></tr>
<tr><td>Load average</td><td><%= master.getServerManager().getAverageLoad() %></td><td>Average number of regions per regionserver. Naive computation.</td></tr> <tr><td>Load average</td><td><%= StringUtils.limitDecimalTo2(master.getServerManager().getAverageLoad()) %></td><td>Average number of regions per regionserver. Naive computation.</td></tr>
<% if (showFragmentation) { %> <% if (showFragmentation) { %>
<tr><td>Fragmentation</td><td><%= frags.get("-TOTAL-") != null ? frags.get("-TOTAL-").intValue() + "%" : "n/a" %></td><td>Overall fragmentation of all tables, including .META. and -ROOT-.</td></tr> <tr><td>Fragmentation</td><td><%= frags.get("-TOTAL-") != null ? frags.get("-TOTAL-").intValue() + "%" : "n/a" %></td><td>Overall fragmentation of all tables, including .META. and -ROOT-.</td></tr>
<% } %> <% } %>