Revert previous commit of HBASE-4276

Realized after committing that the conditions should check
isMetaTable || isRootRegion, not just isMetaTable. Will recommit
a fixed version.


git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1163408 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Todd Lipcon 2011-08-30 21:38:47 +00:00
parent 9f5580eba2
commit 261fcbdd87
3 changed files with 12 additions and 29 deletions

View File

@ -518,8 +518,6 @@ Release 0.90.5 - Unreleased
HBASE-4222 Make HLog more resilient to write pipeline failures
HBASE-4293 More verbose logging in ServerShutdownHandler for meta/root
cases (todd)
HBASE-4276 AssignmentManager debug logs should be at INFO level for
META/ROOT regions (todd)
Release 0.90.4 - August 10, 2011

View File

@ -324,7 +324,6 @@ public class AssignmentManager extends ZooKeeperListener {
boolean intransistion =
processRegionInTransition(hri.getEncodedName(), hri, null);
if (!intransistion) return intransistion;
debugLog(hri, "Waiting on " + HRegionInfo.prettyPrint(hri.getEncodedName()));
synchronized(this.regionsInTransition) {
while (!this.master.isStopped() &&
this.regionsInTransition.containsKey(hri.getEncodedName())) {
@ -445,7 +444,7 @@ public class AssignmentManager extends ZooKeeperListener {
throws KeeperException {
// If was on dead server, its closed now. Force to OFFLINE and then
// handle it like a close; this will get it reassigned if appropriate
debugLog(hri, "RIT " + hri.getEncodedName() + " in state=" +
LOG.debug("RIT " + hri.getEncodedName() + " in state=" +
oldData.getEventType() + " was on deadserver; forcing offline");
ZKAssign.createOrForceNodeOffline(this.watcher, hri,
this.master.getServerName());
@ -1238,8 +1237,7 @@ public class AssignmentManager extends ZooKeeperListener {
RegionPlan plan = getRegionPlan(state, forceNewPlan);
if (plan == null) return; // Should get reassigned later when RIT times out.
try {
debugLog(state.getRegion(),
"Assigning region " + state.getRegion().getRegionNameAsString() +
LOG.debug("Assigning region " + state.getRegion().getRegionNameAsString() +
" to " + plan.getDestination().toString());
// Transition RegionState to PENDING_OPEN
state.update(RegionState.State.PENDING_OPEN, System.currentTimeMillis(),
@ -1292,14 +1290,6 @@ public class AssignmentManager extends ZooKeeperListener {
}
}
private void debugLog(HRegionInfo region, String string) {
if (region.isMetaRegion()) {
LOG.info(string);
} else {
LOG.debug(string);
}
}
/**
* Set region as OFFLINED up in zookeeper
* @param state
@ -1394,7 +1384,7 @@ public class AssignmentManager extends ZooKeeperListener {
}
}
if (newPlan) {
debugLog(state.getRegion(), "No previous transition plan was found (or we are ignoring " +
LOG.debug("No previous transition plan was found (or we are ignoring " +
"an existing plan) for " + state.getRegion().getRegionNameAsString() +
" so generated a random one; " + randomPlan + "; " +
serverManager.countOfRegionServers() +
@ -1402,7 +1392,7 @@ public class AssignmentManager extends ZooKeeperListener {
", exclude=" + serverToExclude + ") available servers");
return randomPlan;
}
debugLog(state.getRegion(), "Using pre-existing plan for region " +
LOG.debug("Using pre-existing plan for region " +
state.getRegion().getRegionNameAsString() + "; plan=" + existingPlan);
return existingPlan;
}
@ -1431,12 +1421,12 @@ public class AssignmentManager extends ZooKeeperListener {
* @param force if region should be closed even if already closing
*/
public void unassign(HRegionInfo region, boolean force) {
debugLog(region, "Starting unassignment of region " +
LOG.debug("Starting unassignment of region " +
region.getRegionNameAsString() + " (offlining)");
synchronized (this.regions) {
// Check if this region is currently assigned
if (!regions.containsKey(region)) {
debugLog(region, "Attempted to unassign region " +
LOG.debug("Attempted to unassign region " +
region.getRegionNameAsString() + " but it is not " +
"currently assigned anywhere");
return;
@ -1462,12 +1452,12 @@ public class AssignmentManager extends ZooKeeperListener {
} else if (force && state.isPendingClose()) {
// JD 05/25/11
// in my experience this is useless, when this happens it just spins
debugLog(region, "Attempting to unassign region " +
LOG.debug("Attempting to unassign region " +
region.getRegionNameAsString() + " which is already pending close "
+ "but forcing an additional close");
state.update(RegionState.State.PENDING_CLOSE);
} else {
debugLog(region, "Attempting to unassign region " +
LOG.debug("Attempting to unassign region " +
region.getRegionNameAsString() + " but it is " +
"already in transition (" + state.getState() + ")");
return;
@ -1482,12 +1472,12 @@ public class AssignmentManager extends ZooKeeperListener {
// TODO: We should consider making this look more like it does for the
// region open where we catch all throwables and never abort
if (serverManager.sendRegionClose(server, state.getRegion())) {
debugLog(region, "Sent CLOSE to " + server + " for region " +
LOG.debug("Sent CLOSE to " + server + " for region " +
region.getRegionNameAsString());
return;
}
// This never happens. Currently regionserver close always return true.
LOG.warn("Server " + server + " region CLOSE RPC returned false for " +
LOG.debug("Server " + server + " region CLOSE RPC returned false for " +
region.getEncodedName());
} catch (NotServingRegionException nsre) {
LOG.info("Server " + server + " returned " + nsre + " for " +

View File

@ -114,13 +114,8 @@ public class OpenedRegionHandler extends EventHandler implements TotesHRegionInf
+ "this table is disabled, triggering close of region");
assignmentManager.unassign(regionInfo);
} else {
String msg = "Opened region " + regionInfo.getRegionNameAsString() +
" on " + this.sn.toString();
if (regionInfo.isMetaRegion()) {
LOG.info(msg);
} else {
LOG.debug(msg);
}
LOG.debug("Opened region " + regionInfo.getRegionNameAsString() +
" on " + this.sn.toString());
}
}
}