HBASE-5141 Memory leak in MonitoredRPCHandlerImpl -- REVERT. OVER-COMMITTED. REVERTING ALL SO CAN REDO COMMIT
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1228747 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
25d465426a
commit
45b7b8955b
|
@ -1343,7 +1343,6 @@ public abstract class HBaseServer implements RpcServer {
|
|||
errorClass, error);
|
||||
}
|
||||
call.sendResponseIfReady();
|
||||
status.markComplete("Sent response");
|
||||
} catch (InterruptedException e) {
|
||||
if (running) { // unexpected -- log it
|
||||
LOG.info(getName() + " caught: " +
|
||||
|
|
|
@ -711,13 +711,13 @@ public class AssignmentManager extends ZooKeeperListener {
|
|||
// Handle CLOSED by assigning elsewhere or stopping if a disable
|
||||
// If we got here all is good. Need to update RegionState -- else
|
||||
// what follows will fail because not in expected state.
|
||||
regionState.update(RegionState.State.CLOSED, data.getStamp(),
|
||||
data.getOrigin());
|
||||
removeClosedRegion(regionState.getRegion());
|
||||
this.executorService.submit(new ClosedRegionHandler(master, this,
|
||||
regionState.getRegion()));
|
||||
regionState.update(RegionState.State.CLOSED,
|
||||
data.getStamp(), data.getOrigin());
|
||||
removeClosedRegion(regionState.getRegion());
|
||||
this.executorService.submit(new ClosedRegionHandler(master,
|
||||
this, regionState.getRegion()));
|
||||
break;
|
||||
|
||||
|
||||
case RS_ZK_REGION_FAILED_OPEN:
|
||||
if (regionState == null ||
|
||||
(!regionState.isPendingOpen() && !regionState.isOpening())) {
|
||||
|
@ -1193,7 +1193,8 @@ public class AssignmentManager extends ZooKeeperListener {
|
|||
region.getRegionNameAsString());
|
||||
return;
|
||||
}
|
||||
RegionState state = addToRegionsInTransition(region, hijack);
|
||||
RegionState state = addToRegionsInTransition(region,
|
||||
hijack);
|
||||
synchronized (state) {
|
||||
assign(region, state, setOfflineInZK, forceNewPlan, hijack);
|
||||
}
|
||||
|
@ -1392,8 +1393,9 @@ public class AssignmentManager extends ZooKeeperListener {
|
|||
this.regionsInTransition.put(encodedName, state);
|
||||
} else {
|
||||
// If we are reassigning the node do not force in-memory state to OFFLINE.
|
||||
// Based on the znode state we will decide if to change in-memory state to
|
||||
// OFFLINE or not. It will be done before setting znode to OFFLINE state.
|
||||
// Based on the znode state we will decide if to change
|
||||
// in-memory state to OFFLINE or not. It will
|
||||
// be done before setting the znode to OFFLINE state.
|
||||
if (!hijack) {
|
||||
LOG.debug("Forcing OFFLINE; was=" + state);
|
||||
state.update(RegionState.State.OFFLINE);
|
||||
|
@ -1417,7 +1419,8 @@ public class AssignmentManager extends ZooKeeperListener {
|
|||
if (setOfflineInZK) {
|
||||
// get the version of the znode after setting it to OFFLINE.
|
||||
// versionOfOfflineNode will be -1 if the znode was not set to OFFLINE
|
||||
versionOfOfflineNode = setOfflineInZooKeeper(state, hijack);
|
||||
versionOfOfflineNode = setOfflineInZooKeeper(state,
|
||||
hijack);
|
||||
if(versionOfOfflineNode != -1){
|
||||
if (isDisabledorDisablingRegionInRIT(region)) {
|
||||
return;
|
||||
|
@ -1526,7 +1529,8 @@ public class AssignmentManager extends ZooKeeperListener {
|
|||
* @return the version of the offline node if setting of the OFFLINE node was
|
||||
* successful, -1 otherwise.
|
||||
*/
|
||||
int setOfflineInZooKeeper(final RegionState state, boolean hijack) {
|
||||
int setOfflineInZooKeeper(final RegionState state,
|
||||
boolean hijack) {
|
||||
// In case of reassignment the current state in memory need not be
|
||||
// OFFLINE.
|
||||
if (!hijack && !state.isClosed() && !state.isOffline()) {
|
||||
|
@ -1739,7 +1743,7 @@ public class AssignmentManager extends ZooKeeperListener {
|
|||
region.getRegionNameAsString() + " (offlining)");
|
||||
synchronized (this.regions) {
|
||||
// Check if this region is currently assigned
|
||||
if (!this.regions.containsKey(region)) {
|
||||
if (!regions.containsKey(region)) {
|
||||
LOG.debug("Attempted to unassign region " +
|
||||
region.getRegionNameAsString() + " but it is not " +
|
||||
"currently assigned anywhere");
|
||||
|
@ -2714,7 +2718,6 @@ public class AssignmentManager extends ZooKeeperListener {
|
|||
|
||||
return matchAM;
|
||||
}
|
||||
|
||||
/**
|
||||
* Process shutdown server removing any assignments.
|
||||
* @param sn Server that went down.
|
||||
|
|
|
@ -98,7 +98,6 @@ public class ClosedRegionHandler extends EventHandler implements TotesHRegionInf
|
|||
}
|
||||
// ZK Node is in CLOSED state, assign it.
|
||||
assignmentManager.setOffline(regionInfo);
|
||||
// This below has to do w/ online enable/disable of a table
|
||||
assignmentManager.removeClosedRegion(regionInfo);
|
||||
assignmentManager.assign(regionInfo, true);
|
||||
}
|
||||
|
|
|
@ -53,6 +53,7 @@ public class ServerShutdownHandler extends EventHandler {
|
|||
private final ServerName serverName;
|
||||
private final MasterServices services;
|
||||
private final DeadServer deadServers;
|
||||
private final boolean shouldSplitHlog; // whether to split HLog or not
|
||||
|
||||
public ServerShutdownHandler(final Server server, final MasterServices services,
|
||||
final DeadServer deadServers, final ServerName serverName,
|
||||
|
@ -72,6 +73,7 @@ public class ServerShutdownHandler extends EventHandler {
|
|||
if (!this.deadServers.contains(this.serverName)) {
|
||||
LOG.warn(this.serverName + " is NOT in deadservers; it should be!");
|
||||
}
|
||||
this.shouldSplitHlog = shouldSplitHlog;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -184,7 +186,7 @@ public class ServerShutdownHandler extends EventHandler {
|
|||
LOG.info("Server " + serverName +
|
||||
" was carrying ROOT. Trying to assign.");
|
||||
this.services.getAssignmentManager().
|
||||
regionOffline(HRegionInfo.ROOT_REGIONINFO);
|
||||
regionOffline(HRegionInfo.ROOT_REGIONINFO);
|
||||
verifyAndAssignRootWithRetries();
|
||||
}
|
||||
|
||||
|
@ -193,7 +195,7 @@ public class ServerShutdownHandler extends EventHandler {
|
|||
LOG.info("Server " + serverName +
|
||||
" was carrying META. Trying to assign.");
|
||||
this.services.getAssignmentManager().
|
||||
regionOffline(HRegionInfo.FIRST_META_REGIONINFO);
|
||||
regionOffline(HRegionInfo.FIRST_META_REGIONINFO);
|
||||
this.services.getAssignmentManager().assignMeta();
|
||||
}
|
||||
|
||||
|
@ -226,8 +228,9 @@ public class ServerShutdownHandler extends EventHandler {
|
|||
// OFFLINE? -- and then others after like CLOSING that depend on log
|
||||
// splitting.
|
||||
List<RegionState> regionsInTransition =
|
||||
this.services.getAssignmentManager().
|
||||
processServerShutdown(this.serverName);
|
||||
this.services.getAssignmentManager()
|
||||
.processServerShutdown(this.serverName);
|
||||
|
||||
|
||||
// Wait on meta to come online; we need it to progress.
|
||||
// TODO: Best way to hold strictly here? We should build this retry logic
|
||||
|
@ -264,8 +267,8 @@ public class ServerShutdownHandler extends EventHandler {
|
|||
for (RegionState rit : regionsInTransition) {
|
||||
if (!rit.isClosing() && !rit.isPendingClose()) {
|
||||
LOG.debug("Removed " + rit.getRegion().getRegionNameAsString() +
|
||||
" from list of regions to assign because in RIT; region state: " +
|
||||
rit.getState());
|
||||
" from list of regions to assign because in RIT" + " region state: "
|
||||
+ rit.getState());
|
||||
if (hris != null) hris.remove(rit.getRegion());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -217,13 +217,6 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl
|
|||
this.remotePort = remotePort;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void markComplete(String status) {
|
||||
super.markComplete(status);
|
||||
this.params = null;
|
||||
this.packet = null;
|
||||
}
|
||||
|
||||
public synchronized Map<String, Object> toMap() {
|
||||
// only include RPC info if the Handler is actively servicing an RPC call
|
||||
Map<String, Object> map = super.toMap();
|
||||
|
|
|
@ -53,8 +53,6 @@ public class TestAssignmentManager {
|
|||
private static final HBaseTestingUtility HTU = new HBaseTestingUtility();
|
||||
private static final ServerName RANDOM_SERVERNAME =
|
||||
new ServerName("example.org", 1234, 5678);
|
||||
private static final ServerName SRC_SERVERNAME =
|
||||
new ServerName("example.org", 5678, 5678);
|
||||
private Server server;
|
||||
private ServerManager serverManager;
|
||||
private CatalogTracker ct;
|
||||
|
@ -96,25 +94,6 @@ public class TestAssignmentManager {
|
|||
if (this.watcher != null) this.watcher.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBalanceAndShutdownServerAtSameTime()
|
||||
throws IOException, KeeperException {
|
||||
// Region to use in test.
|
||||
final HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
|
||||
// Amend ServerManager mock so that when we do send close of our region on
|
||||
// RANDOM_SERVERNAME, it will return true rather than default null.
|
||||
Mockito.when(this.serverManager.sendRegionClose(RANDOM_SERVERNAME, hri)).
|
||||
thenReturn(true);
|
||||
// Create an AM.
|
||||
AssignmentManager am =
|
||||
new AssignmentManager(this.server, this.serverManager, this.ct, this.executor);
|
||||
// Call the balance function
|
||||
RegionPlan plan = new RegionPlan(hri, SRC_SERVERNAME, RANDOM_SERVERNAME);
|
||||
am.balance(plan);
|
||||
// This delete will fail if the previous unassign did wrong thing.
|
||||
ZKAssign.deleteClosingNode(this.watcher, hri);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUnassignWithSplitAtSameTime() throws KeeperException, IOException {
|
||||
// Region to use in test.
|
||||
|
@ -203,4 +182,4 @@ public class TestAssignmentManager {
|
|||
@org.junit.Rule
|
||||
public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
|
||||
new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue