HBASE-4276 AssignmentManager debug logs should be at INFO level for META/ROOT regions.

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1163400 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Todd Lipcon 2011-08-30 21:28:53 +00:00
parent b767480a19
commit be00c74833
3 changed files with 29 additions and 12 deletions

View File

@ -518,6 +518,8 @@ Release 0.90.5 - Unreleased
HBASE-4222 Make HLog more resilient to write pipeline failures
HBASE-4293 More verbose logging in ServerShutdownHandler for meta/root
cases (todd)
HBASE-4276 AssignmentManager debug logs should be at INFO level for
META/ROOT regions (todd)
Release 0.90.4 - August 10, 2011

View File

@ -324,6 +324,7 @@ public class AssignmentManager extends ZooKeeperListener {
boolean intransistion =
processRegionInTransition(hri.getEncodedName(), hri, null);
if (!intransistion) return intransistion;
debugLog(hri, "Waiting on " + HRegionInfo.prettyPrint(hri.getEncodedName()));
synchronized(this.regionsInTransition) {
while (!this.master.isStopped() &&
this.regionsInTransition.containsKey(hri.getEncodedName())) {
@ -444,7 +445,7 @@ public class AssignmentManager extends ZooKeeperListener {
throws KeeperException {
// If was on dead server, its closed now. Force to OFFLINE and then
// handle it like a close; this will get it reassigned if appropriate
LOG.debug("RIT " + hri.getEncodedName() + " in state=" +
debugLog(hri, "RIT " + hri.getEncodedName() + " in state=" +
oldData.getEventType() + " was on deadserver; forcing offline");
ZKAssign.createOrForceNodeOffline(this.watcher, hri,
this.master.getServerName());
@ -1237,7 +1238,8 @@ public class AssignmentManager extends ZooKeeperListener {
RegionPlan plan = getRegionPlan(state, forceNewPlan);
if (plan == null) return; // Should get reassigned later when RIT times out.
try {
LOG.debug("Assigning region " + state.getRegion().getRegionNameAsString() +
debugLog(state.getRegion(),
"Assigning region " + state.getRegion().getRegionNameAsString() +
" to " + plan.getDestination().toString());
// Transition RegionState to PENDING_OPEN
state.update(RegionState.State.PENDING_OPEN, System.currentTimeMillis(),
@ -1290,6 +1292,14 @@ public class AssignmentManager extends ZooKeeperListener {
}
}
private void debugLog(HRegionInfo region, String string) {
if (region.isMetaRegion()) {
LOG.info(string);
} else {
LOG.debug(string);
}
}
/**
* Set region as OFFLINED up in zookeeper
* @param state
@ -1384,7 +1394,7 @@ public class AssignmentManager extends ZooKeeperListener {
}
}
if (newPlan) {
LOG.debug("No previous transition plan was found (or we are ignoring " +
debugLog(state.getRegion(), "No previous transition plan was found (or we are ignoring " +
"an existing plan) for " + state.getRegion().getRegionNameAsString() +
" so generated a random one; " + randomPlan + "; " +
serverManager.countOfRegionServers() +
@ -1392,7 +1402,7 @@ public class AssignmentManager extends ZooKeeperListener {
", exclude=" + serverToExclude + ") available servers");
return randomPlan;
}
LOG.debug("Using pre-existing plan for region " +
debugLog(state.getRegion(), "Using pre-existing plan for region " +
state.getRegion().getRegionNameAsString() + "; plan=" + existingPlan);
return existingPlan;
}
@ -1421,12 +1431,12 @@ public class AssignmentManager extends ZooKeeperListener {
* @param force if region should be closed even if already closing
*/
public void unassign(HRegionInfo region, boolean force) {
LOG.debug("Starting unassignment of region " +
debugLog(region, "Starting unassignment of region " +
region.getRegionNameAsString() + " (offlining)");
synchronized (this.regions) {
// Check if this region is currently assigned
if (!regions.containsKey(region)) {
LOG.debug("Attempted to unassign region " +
debugLog(region, "Attempted to unassign region " +
region.getRegionNameAsString() + " but it is not " +
"currently assigned anywhere");
return;
@ -1452,12 +1462,12 @@ public class AssignmentManager extends ZooKeeperListener {
} else if (force && state.isPendingClose()) {
// JD 05/25/11
// in my experience this is useless, when this happens it just spins
LOG.debug("Attempting to unassign region " +
debugLog(region, "Attempting to unassign region " +
region.getRegionNameAsString() + " which is already pending close "
+ "but forcing an additional close");
state.update(RegionState.State.PENDING_CLOSE);
} else {
LOG.debug("Attempting to unassign region " +
debugLog(region, "Attempting to unassign region " +
region.getRegionNameAsString() + " but it is " +
"already in transition (" + state.getState() + ")");
return;
@ -1472,12 +1482,12 @@ public class AssignmentManager extends ZooKeeperListener {
// TODO: We should consider making this look more like it does for the
// region open where we catch all throwables and never abort
if (serverManager.sendRegionClose(server, state.getRegion())) {
LOG.debug("Sent CLOSE to " + server + " for region " +
debugLog(region, "Sent CLOSE to " + server + " for region " +
region.getRegionNameAsString());
return;
}
// This never happens. Currently regionserver close always return true.
LOG.debug("Server " + server + " region CLOSE RPC returned false for " +
LOG.warn("Server " + server + " region CLOSE RPC returned false for " +
region.getEncodedName());
} catch (NotServingRegionException nsre) {
LOG.info("Server " + server + " returned " + nsre + " for " +

View File

@ -114,8 +114,13 @@ public class OpenedRegionHandler extends EventHandler implements TotesHRegionInf
+ "this table is disabled, triggering close of region");
assignmentManager.unassign(regionInfo);
} else {
LOG.debug("Opened region " + regionInfo.getRegionNameAsString() +
" on " + this.sn.toString());
String msg = "Opened region " + regionInfo.getRegionNameAsString() +
" on " + this.sn.toString();
if (regionInfo.isMetaRegion()) {
LOG.info(msg);
} else {
LOG.debug(msg);
}
}
}
}