HBASE-3017 More log pruning
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@999150 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
0cdd2c5eb7
commit
4f0d6bc6be
|
@ -682,7 +682,7 @@ public class AssignmentManager extends ZooKeeperListener {
|
|||
// Get all available servers
|
||||
List<HServerInfo> servers = serverManager.getOnlineServersList();
|
||||
|
||||
LOG.info("Assigning " + allRegions.size() + " across " + servers.size() +
|
||||
LOG.info("Assigning " + allRegions.size() + " regions across " + servers.size() +
|
||||
" servers");
|
||||
|
||||
// Generate a cluster startup region placement plan
|
||||
|
|
|
@ -27,11 +27,11 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.text.DecimalFormat;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.hbase.Chore;
|
||||
import org.apache.hadoop.hbase.HMsg;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
|
@ -94,8 +94,6 @@ public class ServerManager {
|
|||
|
||||
private final DeadServer deadservers = new DeadServer();
|
||||
|
||||
private static final DecimalFormat DF = new DecimalFormat("#.##");
|
||||
|
||||
/**
|
||||
* Dumps into log current stats on dead servers and number of servers
|
||||
* TODO: Make this a metric; dump metrics into log.
|
||||
|
@ -112,7 +110,7 @@ public class ServerManager {
|
|||
double averageLoad = getAverageLoad();
|
||||
String deadServersList = deadservers.toString();
|
||||
LOG.info("regionservers=" + numServers +
|
||||
", averageload=" + DF.format(averageLoad) +
|
||||
", averageload=" + StringUtils.limitDecimalTo2(averageLoad) +
|
||||
((numDeadServers > 0)? ("deadservers=" + deadServersList): ""));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -197,6 +197,9 @@ class SplitTransaction {
|
|||
}
|
||||
this.journal.add(JournalEntry.OFFLINED_PARENT);
|
||||
|
||||
// TODO: If the below were multithreaded would we complete steps in less
|
||||
// elapsed time? St.Ack 20100920
|
||||
|
||||
splitStoreFiles(this.splitdir, hstoreFilesToSplit);
|
||||
// splitStoreFiles creates daughter region dirs under the parent splits dir
|
||||
// Nothing to unroll here if failure -- clean up of CREATE_SPLIT_DIR will
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
<%@ page contentType="text/html;charset=UTF-8"
|
||||
import="java.util.*"
|
||||
import="org.apache.hadoop.conf.Configuration"
|
||||
import="org.apache.hadoop.util.StringUtils"
|
||||
import="org.apache.hadoop.hbase.util.Bytes"
|
||||
import="org.apache.hadoop.hbase.util.JvmVersion"
|
||||
import="org.apache.hadoop.hbase.util.FSUtils"
|
||||
|
@ -66,7 +67,7 @@
|
|||
<tr><td>Hadoop Version</td><td><%= org.apache.hadoop.util.VersionInfo.getVersion() %>, r<%= org.apache.hadoop.util.VersionInfo.getRevision() %></td><td>Hadoop version and svn revision</td></tr>
|
||||
<tr><td>Hadoop Compiled</td><td><%= org.apache.hadoop.util.VersionInfo.getDate() %>, <%= org.apache.hadoop.util.VersionInfo.getUser() %></td><td>When Hadoop version was compiled and by whom</td></tr>
|
||||
<tr><td>HBase Root Directory</td><td><%= FSUtils.getRootDir(master.getConfiguration()).toString() %></td><td>Location of HBase home directory</td></tr>
|
||||
<tr><td>Load average</td><td><%= master.getServerManager().getAverageLoad() %></td><td>Average number of regions per regionserver. Naive computation.</td></tr>
|
||||
<tr><td>Load average</td><td><%= StringUtils.limitDecimalTo2(master.getServerManager().getAverageLoad()) %></td><td>Average number of regions per regionserver. Naive computation.</td></tr>
|
||||
<% if (showFragmentation) { %>
|
||||
<tr><td>Fragmentation</td><td><%= frags.get("-TOTAL-") != null ? frags.get("-TOTAL-").intValue() + "%" : "n/a" %></td><td>Overall fragmentation of all tables, including .META. and -ROOT-.</td></tr>
|
||||
<% } %>
|
||||
|
|
Loading…
Reference in New Issue