diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionTransition.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionTransition.java index 8d32fca76b8..67e24882112 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionTransition.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionTransition.java @@ -54,7 +54,7 @@ public class RegionTransition { } public ServerName getServerName() { - return ProtobufUtil.toServerName(this.rt.getOriginServerName()); + return ProtobufUtil.toServerName(this.rt.getServerName()); } public long getCreateTime() { @@ -105,7 +105,7 @@ public class RegionTransition { setHostName(sn.getHostname()).setPort(sn.getPort()).setStartCode(sn.getStartcode()).build(); ZooKeeperProtos.RegionTransition.Builder builder = ZooKeeperProtos.RegionTransition.newBuilder(). setEventTypeCode(type.getCode()).setRegionName(ByteString.copyFrom(regionName)). - setOriginServerName(pbsn); + setServerName(pbsn); builder.setCreateTime(System.currentTimeMillis()); if (payload != null) builder.setPayload(ByteString.copyFrom(payload)); return new RegionTransition(builder.build()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignCallable.java index 8a58846a54d..adeeed41785 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignCallable.java @@ -42,7 +42,7 @@ public class AssignCallable implements Callable { @Override public Object call() throws Exception { - assignmentManager.assign(hri, true, true, true); + assignmentManager.assign(hri, true, true); return null; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index 47749fb9ae5..f53faebe157 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -23,12 +23,14 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.NavigableMap; import java.util.Set; import java.util.TreeMap; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentSkipListSet; import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicBoolean; @@ -117,9 +119,10 @@ public class AssignmentManager extends ZooKeeperListener { private final Map regionsToReopen; /* - * Maximum times we recurse an assignment. See below in {@link #assign()}. + * Maximum times we recurse an assignment/unassignment. + * See below in {@link #assign()} and {@link #unassign()}. */ - private final int maximumAssignmentAttempts; + private final int maximumAttempts; /** Plans for region movement. Key is the encoded version of a region name*/ // TODO: When do plans get cleaned out? Ever? In server open and in server @@ -158,6 +161,18 @@ public class AssignmentManager extends ZooKeeperListener { */ final AtomicBoolean failoverCleanupDone = new AtomicBoolean(false); + // A temp ZK watcher for bulk assigner to avoid deadlock, + // will be removed in HBASE-6977 + // + // A separate ZK watcher used for async ZK node offline. + // We can't use that exiting one because it could lead to + // deadlocks if its event thread asks for a locker held by a bulk + // assigner thread. This watcher is just for async ZK node offline. + // In HBASE-6977, we are going to process assignment ZK events + // outside of ZK event thread, so there won't be deadlock + // threat anymore. That's when this watcher to be removed. + private final ZooKeeperWatcher asyncOfflineZKWatcher; + /** * Constructs a new assignment manager. * @@ -180,20 +195,24 @@ public class AssignmentManager extends ZooKeeperListener { (new HashMap ()); Configuration conf = server.getConfiguration(); this.timeoutMonitor = new TimeoutMonitor( - conf.getInt("hbase.master.assignment.timeoutmonitor.period", 10000), + conf.getInt("hbase.master.assignment.timeoutmonitor.period", 60000), server, serverManager, - conf.getInt("hbase.master.assignment.timeoutmonitor.timeout", 1800000)); + conf.getInt("hbase.master.assignment.timeoutmonitor.timeout", 1200000)); this.timerUpdater = new TimerUpdater(conf.getInt( - "hbase.master.assignment.timerupdater.period", 10000), server); + "hbase.master.assignment.timerupdater.period", 10000), server); Threads.setDaemonThreadRunning(timerUpdater.getThread(), - server.getServerName() + ".timerUpdater"); + server.getServerName() + ".timerUpdater"); this.zkTable = new ZKTable(this.watcher); - this.maximumAssignmentAttempts = + this.maximumAttempts = this.server.getConfiguration().getInt("hbase.assignment.maximum.attempts", 10); this.balancer = balancer; this.threadPoolExecutorService = Executors.newCachedThreadPool(); this.masterMetrics = metrics;// can be null only with tests. this.regionStates = new RegionStates(server, serverManager); + // A temp ZK watcher for bulk assigner to avoid deadlock, + // will be removed in HBASE-6977 + asyncOfflineZKWatcher = new ZooKeeperWatcher(conf, + "async offline ZK watcher", server); } void startTimeOutMonitor() { @@ -265,7 +284,7 @@ public class AssignmentManager extends ZooKeeperListener { * @throws IOException */ public Pair getReopenStatus(byte[] tableName) - throws IOException { + throws IOException { List hris = MetaReader.getTableRegions(this.server.getCatalogTracker(), tableName); Integer pending = 0; @@ -397,9 +416,8 @@ public class AssignmentManager extends ZooKeeperListener { * @throws IOException */ boolean processRegionInTransitionAndBlockUntilAssigned(final HRegionInfo hri) - throws InterruptedException, KeeperException, IOException { - boolean intransistion = - processRegionInTransition(hri.getEncodedName(), hri, null); + throws InterruptedException, KeeperException, IOException { + boolean intransistion = processRegionInTransition(hri.getEncodedName(), hri); if (!intransistion) return intransistion; LOG.debug("Waiting on " + HRegionInfo.prettyPrint(hri.getEncodedName())); while (!this.server.isStopped() && @@ -416,14 +434,12 @@ public class AssignmentManager extends ZooKeeperListener { * up in zookeeper. * @param encodedRegionName Region to process failover for. * @param regionInfo If null we'll go get it from meta table. - * @param deadServers Can be null * @return True if we processed regionInfo as a RIT. * @throws KeeperException * @throws IOException */ boolean processRegionInTransition(final String encodedRegionName, - final HRegionInfo regionInfo, final Map> deadServers) - throws KeeperException, IOException { + final HRegionInfo regionInfo) throws KeeperException, IOException { // We need a lock here to ensure that we will not put the same region twice // It has no reason to be a lock shared with the other operations. // We can do the lock on the region only, instead of a global lock: what we want to ensure @@ -445,7 +461,7 @@ public class AssignmentManager extends ZooKeeperListener { hri = regionStates.getRegionInfo(rt.getRegionName()); if (hri == null) return false; } - processRegionsInTransition(rt, hri, deadServers, stat.getVersion()); + processRegionsInTransition(rt, hri, stat.getVersion()); return true; } finally { lock.unlock(); @@ -453,16 +469,17 @@ public class AssignmentManager extends ZooKeeperListener { } /** - * This call is invoked only during failover mode, zk assignment node processing. + * This call is invoked only (1) master assign root and meta; + * (2) during failover mode startup, zk assignment node processing. * The locker is set in the caller. * * It should be private but it is used by some test too. */ - void processRegionsInTransition(final RegionTransition rt, final HRegionInfo regionInfo, - final Map> deadServers, int expectedVersion) - throws KeeperException { + void processRegionsInTransition( + final RegionTransition rt, final HRegionInfo regionInfo, + int expectedVersion) throws KeeperException { EventType et = rt.getEventType(); - // Get ServerName. Could be null. + // Get ServerName. Could not be null. ServerName sn = rt.getServerName(); String encodedRegionName = regionInfo.getEncodedName(); LOG.info("Processing region " + regionInfo.getRegionNameAsString() + " in state " + et); @@ -475,9 +492,8 @@ public class AssignmentManager extends ZooKeeperListener { case M_ZK_REGION_CLOSING: // If zk node of the region was updated by a live server skip this // region and just add it into RIT. - if (isOnDeadServer(regionInfo, deadServers) - && !serverManager.isServerOnline(sn)) { - // If was on dead server, its closed now. Force to OFFLINE and this + if (!serverManager.isServerOnline(sn)) { + // If was not online, its closed now. Force to OFFLINE and this // will get it reassigned if appropriate forceOffline(regionInfo, rt); } else { @@ -496,58 +512,42 @@ public class AssignmentManager extends ZooKeeperListener { case M_ZK_REGION_OFFLINE: // If zk node of the region was updated by a live server skip this // region and just add it into RIT. - if (isOnDeadServer(regionInfo, deadServers) - && (sn == null || !serverManager.isServerOnline(sn))) { + if (!serverManager.isServerOnline(sn)) { // Region is offline, insert into RIT and handle it like a closed addToRITandCallClose(regionInfo, RegionState.State.OFFLINE, rt); - } else if (sn != null && !serverManager.isServerOnline(sn)) { - // to handle cases where offline node is created but sendRegionOpen - // RPC is not yet sent - addToRITandCallClose(regionInfo, RegionState.State.OFFLINE, rt); } else { + // Just insert region into RIT. + // If this never updates the timeout will trigger new assignment regionStates.updateRegionState(rt, RegionState.State.PENDING_OPEN); } break; case RS_ZK_REGION_OPENING: - // TODO: Could check if it was on deadServers. If it was, then we could - // do what happens in TimeoutMonitor when it sees this condition. - // Just insert region into RIT - // If this never updates the timeout will trigger new assignment - if (regionInfo.isMetaTable()) { + if (regionInfo.isMetaTable() || !serverManager.isServerOnline(sn)) { regionStates.updateRegionState(rt, RegionState.State.OPENING); // If ROOT or .META. table is waiting for timeout monitor to assign // it may take lot of time when the assignment.timeout.period is // the default value which may be very long. We will not be able // to serve any request during this time. // So we will assign the ROOT and .META. region immediately. + // For a user region, if the server is not online, it takes + // some time for timeout monitor to kick in. We know the region + // won't open. So we will assign the opening + // region immediately too. processOpeningState(regionInfo); - break; - } else if (deadServers != null - && deadServers.keySet().contains(sn)) { - // if the region is found on a dead server, we can assign - // it to a new RS. (HBASE-5882) - processOpeningState(regionInfo); - break; + } else { + // Just insert region into RIT. + // If this never updates the timeout will trigger new assignment + regionStates.updateRegionState(rt, RegionState.State.OPENING); } - regionStates.updateRegionState(rt, RegionState.State.OPENING); break; case RS_ZK_REGION_OPENED: - // Region is opened, insert into RIT and handle it - regionStates.updateRegionState(rt, RegionState.State.OPEN); - // sn could be null if this server is no longer online. If - // that is the case, just let this RIT timeout; it'll be assigned - // to new server then. - if (sn == null) { - LOG.warn("Region in transition " + regionInfo.getEncodedName() + - " references a null server; letting RIT timeout so will be " + - "assigned elsewhere"); - } else if (!serverManager.isServerOnline(sn) - && (isOnDeadServer(regionInfo, deadServers) - || regionInfo.isMetaRegion() || regionInfo.isRootRegion())) { + if (!serverManager.isServerOnline(sn)) { forceOffline(regionInfo, rt); } else { + // Region is opened, insert into RIT and handle it + regionStates.updateRegionState(rt, RegionState.State.OPEN); new OpenedRegionHandler(server, this, regionInfo, sn, expectedVersion).process(); } break; @@ -562,7 +562,6 @@ public class AssignmentManager extends ZooKeeperListener { } } - /** * Put the region hri into an offline state up in zk. * @@ -573,13 +572,12 @@ public class AssignmentManager extends ZooKeeperListener { * @throws KeeperException */ private void forceOffline(final HRegionInfo hri, final RegionTransition oldRt) - throws KeeperException { + throws KeeperException { // If was on dead server, its closed now. Force to OFFLINE and then // handle it like a close; this will get it reassigned if appropriate LOG.debug("RIT " + hri.getEncodedName() + " in state=" + oldRt.getEventType() + " was on deadserver; forcing offline"); - ZKAssign.createOrForceNodeOffline(this.watcher, hri, - this.server.getServerName()); + ZKAssign.createOrForceNodeOffline(this.watcher, hri, oldRt.getServerName()); addToRITandCallClose(hri, RegionState.State.OFFLINE, oldRt); } @@ -591,7 +589,7 @@ public class AssignmentManager extends ZooKeeperListener { * @param oldData */ private void addToRITandCallClose(final HRegionInfo hri, - final RegionState.State state, final RegionTransition oldData) { + final RegionState.State state, final RegionTransition oldData) { regionStates.updateRegionState(oldData, state); new ClosedRegionHandler(this.server, this, hri).process(); } @@ -606,23 +604,6 @@ public class AssignmentManager extends ZooKeeperListener { } } - /** - * @param regionInfo - * @param deadServers Map of deadServers and the regions they were carrying; - * can be null. - * @return True if the passed regionInfo in the passed map of deadServers? - */ - private boolean isOnDeadServer(final HRegionInfo regionInfo, - final Map> deadServers) { - if (deadServers == null) return false; - for (List deadRegions: deadServers.values()) { - if (deadRegions.contains(regionInfo)) { - return true; - } - } - return false; - } - /** * Handles various states an unassigned node can be in. *

@@ -639,10 +620,6 @@ public class AssignmentManager extends ZooKeeperListener { return; } final ServerName sn = rt.getServerName(); - if (sn == null) { - LOG.warn("Null servername: " + rt); - return; - } // Check if this is a special HBCK transition if (sn.equals(HBCK_CODE_SERVERNAME)) { handleHBCK(rt); @@ -653,30 +630,48 @@ public class AssignmentManager extends ZooKeeperListener { String encodedName = HRegionInfo.encodeRegionName(regionName); String prettyPrintedRegionName = HRegionInfo.prettyPrint(encodedName); // Verify this is a known server - if (!serverManager.isServerOnline(sn) && - !this.server.getServerName().equals(sn) + if (!serverManager.isServerOnline(sn) && !ignoreStatesRSOffline.contains(rt.getEventType())) { LOG.warn("Attempted to handle region transition for server but " + "server is not online: " + prettyPrintedRegionName); return; } - // We need a lock on the region as we could update it - Lock lock = locker.acquireLock(encodedName); - try { - // Printing if the event was created a long time ago helps debugging - boolean lateEvent = createTime < (System.currentTimeMillis() - 15000); - RegionState regionState = regionStates.getRegionTransitionState(encodedName); + RegionState regionState = + regionStates.getRegionTransitionState(encodedName); + long startTime = System.currentTimeMillis(); + if (LOG.isDebugEnabled()) { + boolean lateEvent = createTime < (startTime - 15000); LOG.debug("Handling transition=" + rt.getEventType() + ", server=" + sn + ", region=" + (prettyPrintedRegionName == null ? "null" : prettyPrintedRegionName) + (lateEvent ? ", which is more than 15 seconds late" : "") + ", current state from region state map =" + regionState); - switch (rt.getEventType()) { - case M_ZK_REGION_OFFLINE: - // Nothing to do. - break; + } + // We don't do anything for this event, + // so separate it out, no need to lock/unlock anything + if (rt.getEventType() == EventType.M_ZK_REGION_OFFLINE) { + return; + } + // We need a lock on the region as we could update it + Lock lock = locker.acquireLock(encodedName); + try { + RegionState latestState = + regionStates.getRegionTransitionState(encodedName); + if ((regionState == null && latestState != null) + || (regionState != null && latestState == null) + || (regionState != null && latestState != null + && latestState.getState() != regionState.getState())) { + LOG.warn("Region state changed from " + regionState + " to " + + latestState + ", while acquiring lock"); + } + long waitedTime = System.currentTimeMillis() - startTime; + if (waitedTime > 5000) { + LOG.warn("Took " + waitedTime + "ms to acquire the lock"); + } + regionState = latestState; + switch (rt.getEventType()) { case RS_ZK_REGION_SPLITTING: if (!isInStateForSplitting(regionState)) break; regionStates.updateRegionState(rt, RegionState.State.SPLITTING); @@ -725,12 +720,12 @@ public class AssignmentManager extends ZooKeeperListener { case M_ZK_REGION_CLOSING: // Should see CLOSING after we have asked it to CLOSE or additional // times after already being in state of CLOSING - if (regionState != null && - (!regionState.isPendingClose() && !regionState.isClosing())) { - LOG.warn("Received CLOSING for region " + prettyPrintedRegionName + - " from server " + sn + " but region was in " + - " the state " + regionState + " and not " + - "in expected PENDING_CLOSE or CLOSING states"); + if (regionState != null + && !regionState.isPendingCloseOrClosingOnServer(sn)) { + LOG.warn("Received CLOSING for region " + prettyPrintedRegionName + + " from server " + sn + " but region was in the state " + regionState + + " and not in expected PENDING_CLOSE or CLOSING states," + + " or not on the expected server"); return; } // Transition to CLOSING (or update stamp if already CLOSING) @@ -739,12 +734,12 @@ public class AssignmentManager extends ZooKeeperListener { case RS_ZK_REGION_CLOSED: // Should see CLOSED after CLOSING but possible after PENDING_CLOSE - if (regionState != null && - (!regionState.isPendingClose() && !regionState.isClosing())) { - LOG.warn("Received CLOSED for region " + prettyPrintedRegionName + - " from server " + sn + " but region was in " + - " the state " + regionState + " and not " + - "in expected PENDING_CLOSE or CLOSING states"); + if (regionState != null + && !regionState.isPendingCloseOrClosingOnServer(sn)) { + LOG.warn("Received CLOSED for region " + prettyPrintedRegionName + + " from server " + sn + " but region was in the state " + regionState + + " and not in expected PENDING_CLOSE or CLOSING states," + + " or not on the expected server"); return; } // Handle CLOSED by assigning elsewhere or stopping if a disable @@ -759,11 +754,12 @@ public class AssignmentManager extends ZooKeeperListener { break; case RS_ZK_REGION_FAILED_OPEN: - if (regionState != null && - (!regionState.isPendingOpen() && !regionState.isOpening())) { - LOG.warn("Received FAILED_OPEN for region " + prettyPrintedRegionName + - " from server " + sn + " but region was in " + - " the state " + regionState + " and not in PENDING_OPEN or OPENING"); + if (regionState != null + && !regionState.isPendingOpenOrOpeningOnServer(sn)) { + LOG.warn("Received FAILED_OPEN for region " + prettyPrintedRegionName + + " from server " + sn + " but region was in the state " + regionState + + " and not in expected PENDING_OPEN or OPENING states," + + " or not on the expected server"); return; } // Handle this the same as if it were opened and then closed. @@ -771,7 +767,7 @@ public class AssignmentManager extends ZooKeeperListener { // When there are more than one region server a new RS is selected as the // destination and the same is updated in the regionplan. (HBASE-5546) if (regionState != null) { - getRegionPlan(regionState, sn, true); + getRegionPlan(regionState.getRegion(), sn, true); this.executorService.submit(new ClosedRegionHandler(server, this, regionState.getRegion())); } @@ -780,13 +776,12 @@ public class AssignmentManager extends ZooKeeperListener { case RS_ZK_REGION_OPENING: // Should see OPENING after we have asked it to OPEN or additional // times after already being in state of OPENING - if (regionState != null && - (!regionState.isPendingOpen() && !regionState.isOpening())) { - LOG.warn("Received OPENING for region " + - prettyPrintedRegionName + - " from server " + sn + " but region was in " + - " the state " + regionState + " and not " + - "in expected PENDING_OPEN or OPENING states"); + if (regionState != null + && !regionState.isPendingOpenOrOpeningOnServer(sn)) { + LOG.warn("Received OPENING for region " + prettyPrintedRegionName + + " from server " + sn + " but region was in the state " + regionState + + " and not in expected PENDING_OPEN or OPENING states," + + " or not on the expected server"); return; } // Transition to OPENING (or update stamp if already OPENING) @@ -795,13 +790,12 @@ public class AssignmentManager extends ZooKeeperListener { case RS_ZK_REGION_OPENED: // Should see OPENED after OPENING but possible after PENDING_OPEN - if (regionState != null && - (!regionState.isPendingOpen() && !regionState.isOpening())) { - LOG.warn("Received OPENED for region " + - prettyPrintedRegionName + - " from server " + sn + " but region was in " + - " the state " + regionState + " and not " + - "in expected PENDING_OPEN or OPENING states"); + if (regionState != null + && !regionState.isPendingOpenOrOpeningOnServer(sn)) { + LOG.warn("Received OPENED for region " + prettyPrintedRegionName + + " from server " + sn + " but region was in the state " + regionState + + " and not in expected PENDING_OPEN or OPENING states," + + " or not on the expected server"); return; } // Handle OPENED by removing from transition and deleted zk node @@ -948,31 +942,36 @@ public class AssignmentManager extends ZooKeeperListener { public void nodeDeleted(final String path) { if (path.startsWith(this.watcher.assignmentZNode)) { String regionName = ZKAssign.getRegionName(this.watcher, path); - RegionState rs = regionStates.getRegionTransitionState(regionName); - if (rs != null) { - HRegionInfo regionInfo = rs.getRegion(); - if (rs.isSplit()) { - LOG.debug("Ephemeral node deleted, regionserver crashed?, " + - "clearing from RIT; rs=" + rs); - regionOffline(rs.getRegion()); - } else { - LOG.debug("The znode of region " + regionInfo.getRegionNameAsString() - + " has been deleted."); - if (rs.isOpened()) { - ServerName serverName = rs.getServerName(); - regionOnline(regionInfo, serverName); - LOG.info("The master has opened the region " - + regionInfo.getRegionNameAsString() + " that was online on " - + serverName); - if (this.getZKTable().isDisablingOrDisabledTable( - regionInfo.getTableNameAsString())) { - LOG.debug("Opened region " - + regionInfo.getRegionNameAsString() + " but " - + "this table is disabled, triggering close of region"); - unassign(regionInfo); + Lock lock = locker.acquireLock(regionName); + try { + RegionState rs = regionStates.getRegionTransitionState(regionName); + if (rs != null) { + HRegionInfo regionInfo = rs.getRegion(); + if (rs.isSplit()) { + LOG.debug("Ephemeral node deleted, regionserver crashed?, " + + "clearing from RIT; rs=" + rs); + regionOffline(rs.getRegion()); + } else { + LOG.debug("The znode of region " + regionInfo.getRegionNameAsString() + + " has been deleted."); + if (rs.isOpened()) { + ServerName serverName = rs.getServerName(); + regionOnline(regionInfo, serverName); + LOG.info("The master has opened the region " + + regionInfo.getRegionNameAsString() + " that was online on " + + serverName); + if (this.getZKTable().isDisablingOrDisabledTable( + regionInfo.getTableNameAsString())) { + LOG.debug("Opened region " + + regionInfo.getRegionNameAsString() + " but " + + "this table is disabled, triggering close of region"); + unassign(regionInfo); + } } } } + } finally { + lock.unlock(); } } } @@ -1125,22 +1124,12 @@ public class AssignmentManager extends ZooKeeperListener { assign(region, setOfflineInZK, false); } - public void assign(HRegionInfo region, boolean setOfflineInZK, - boolean forceNewPlan) { - assign(region, setOfflineInZK, forceNewPlan, false); - } - /** - * @param region - * @param setOfflineInZK - * @param forceNewPlan - * @param hijack True if new assignment is needed, false otherwise + * Use care with forceNewPlan. It could cause double assignment. */ - public void assign(HRegionInfo region, boolean setOfflineInZK, - boolean forceNewPlan, boolean hijack) { - // If hijack is true do not call disableRegionIfInRIT as - // we have not yet moved the znode to OFFLINE state. - if (!hijack && isDisabledorDisablingRegionInRIT(region)) { + public void assign(HRegionInfo region, + boolean setOfflineInZK, boolean forceNewPlan) { + if (!setOfflineInZK && isDisabledorDisablingRegionInRIT(region)) { return; } if (this.serverManager.isClusterShutdown()) { @@ -1148,11 +1137,13 @@ public class AssignmentManager extends ZooKeeperListener { region.getRegionNameAsString()); return; } - RegionState state = forceRegionStateToOffline(region, hijack); String encodedName = region.getEncodedName(); Lock lock = locker.acquireLock(encodedName); try { - assign(region, state, setOfflineInZK, forceNewPlan, hijack); + RegionState state = forceRegionStateToOffline(region, forceNewPlan); + if (state != null) { + assign(state, setOfflineInZK, forceNewPlan); + } } finally { lock.unlock(); } @@ -1166,226 +1157,251 @@ public class AssignmentManager extends ZooKeeperListener { */ boolean assign(final ServerName destination, final List regions) { - if (regions.size() == 0) { + int regionCount = regions.size(); + if (regionCount == 0) { return true; } - LOG.debug("Bulk assigning " + regions.size() + " region(s) to " + + LOG.debug("Bulk assigning " + regionCount + " region(s) to " + destination.toString()); - List states = new ArrayList(regions.size()); + Set encodedNames = new HashSet(regionCount); for (HRegionInfo region : regions) { - states.add(forceRegionStateToOffline(region)); - } - // Add region plans, so we can updateTimers when one region is opened so - // that unnecessary timeout on RIT is reduced. - Map plans = new HashMap(regions.size()); - for (HRegionInfo region : regions) { - plans.put(region.getEncodedName(), new RegionPlan(region, null, - destination)); - } - this.addPlans(plans); - - // Presumption is that only this thread will be updating the state at this - // time; i.e. handlers on backend won't be trying to set it to OPEN, etc. - AtomicInteger counter = new AtomicInteger(0); - CreateUnassignedAsyncCallback cb = - new CreateUnassignedAsyncCallback(regionStates, this.watcher, destination, counter); - for (RegionState state: states) { - if (!asyncSetOfflineInZooKeeper(state, cb, state)) { - return false; - } - } - // Wait until all unassigned nodes have been put up and watchers set. - int total = regions.size(); - for (int oldCounter = 0; !server.isStopped();) { - int count = counter.get(); - if (oldCounter != count) { - LOG.info(destination.toString() + " unassigned znodes=" + count + - " of total=" + total); - oldCounter = count; - } - if (count == total) break; - Threads.sleep(10); - } - if (server.isStopped()) { - return false; + encodedNames.add(region.getEncodedName()); } - // Move on to open regions. + List failedToOpenRegions = new ArrayList(); + Map locks = locker.acquireLocks(encodedNames); try { - // Send OPEN RPC. If it fails on a IOE or RemoteException, the - // TimeoutMonitor will pick up the pieces. - long maxWaitTime = System.currentTimeMillis() + - this.server.getConfiguration(). - getLong("hbase.regionserver.rpc.startup.waittime", 60000); - while (!this.server.isStopped()) { - try { - List regionOpeningStateList = this.serverManager - .sendRegionOpen(destination, regions); - if (regionOpeningStateList == null) { - // Failed getting RPC connection to this server - return false; - } - for (int i = 0; i < regionOpeningStateList.size(); i++) { - if (regionOpeningStateList.get(i) == RegionOpeningState.ALREADY_OPENED) { - processAlreadyOpenedRegion(regions.get(i), destination); - } else if (regionOpeningStateList.get(i) == RegionOpeningState.FAILED_OPENING) { - // Failed opening this region, reassign it - assign(regions.get(i), true, true); - } - } - break; - } catch (RemoteException e) { - IOException decodedException = e.unwrapRemoteException(); - if (decodedException instanceof RegionServerStoppedException) { - LOG.warn("The region server was shut down, ", decodedException); - // No need to retry, the region server is a goner. - return false; - } else if (decodedException instanceof ServerNotRunningYetException) { - // This is the one exception to retry. For all else we should just fail - // the startup. - long now = System.currentTimeMillis(); - if (now > maxWaitTime) throw e; - LOG.debug("Server is not yet up; waiting up to " + - (maxWaitTime - now) + "ms", e); - Thread.sleep(100); - } - - throw decodedException; + AtomicInteger counter = new AtomicInteger(0); + Map offlineNodesVersions = new ConcurrentHashMap(); + OfflineCallback cb = new OfflineCallback( + regionStates, asyncOfflineZKWatcher, destination, counter, offlineNodesVersions); + Map plans = new HashMap(regions.size()); + List states = new ArrayList(regions.size()); + for (HRegionInfo region : regions) { + String encodedRegionName = region.getEncodedName(); + RegionState state = forceRegionStateToOffline(region, true); + if (state != null && asyncSetOfflineInZooKeeper( + state, asyncOfflineZKWatcher, cb, destination)) { + RegionPlan plan = new RegionPlan(region, state.getServerName(), destination); + plans.put(encodedRegionName, plan); + states.add(state); + } else { + LOG.warn("failed to force region state to offline or " + + "failed to set it offline in ZK, will reassign later: " + region); + failedToOpenRegions.add(region); // assign individually later + Lock lock = locks.remove(encodedRegionName); + lock.unlock(); } } - } catch (IOException e) { - // Can be a socket timeout, EOF, NoRouteToHost, etc - LOG.info("Unable to communicate with the region server in order" + + + // Wait until all unassigned nodes have been put up and watchers set. + int total = states.size(); + for (int oldCounter = 0; !server.isStopped();) { + int count = counter.get(); + if (oldCounter != count) { + LOG.info(destination.toString() + " unassigned znodes=" + count + + " of total=" + total); + oldCounter = count; + } + if (count >= total) break; + Threads.sleep(5); + } + + if (server.isStopped()) { + return false; + } + + // Add region plans, so we can updateTimers when one region is opened so + // that unnecessary timeout on RIT is reduced. + this.addPlans(plans); + + List> regionOpenInfos = + new ArrayList>(states.size()); + for (RegionState state: states) { + HRegionInfo region = state.getRegion(); + String encodedRegionName = region.getEncodedName(); + Integer nodeVersion = offlineNodesVersions.get(encodedRegionName); + if (nodeVersion == null || nodeVersion.intValue() == -1) { + LOG.warn("failed to offline in zookeeper: " + region); + failedToOpenRegions.add(region); // assign individually later + Lock lock = locks.remove(encodedRegionName); + lock.unlock(); + } else { + try { // Set the ZK watcher explicitly + ZKAssign.getData(this.watcher, encodedRegionName); + } catch (KeeperException e) { + server.abort("Unexpected exception watching ZKAssign node", e); + return false; + } + regionStates.updateRegionState(region, + RegionState.State.PENDING_OPEN, destination); + regionOpenInfos.add(new Pair( + region, nodeVersion)); + } + } + + // Move on to open regions. + try { + // Send OPEN RPC. If it fails on a IOE or RemoteException, the + // TimeoutMonitor will pick up the pieces. + long maxWaitTime = System.currentTimeMillis() + + this.server.getConfiguration(). + getLong("hbase.regionserver.rpc.startup.waittime", 60000); + while (!this.server.isStopped()) { + try { + List regionOpeningStateList = serverManager + .sendRegionOpen(destination, regionOpenInfos); + if (regionOpeningStateList == null) { + // Failed getting RPC connection to this server + return false; + } + for (int i = 0, n = regionOpeningStateList.size(); i < n; i++) { + RegionOpeningState openingState = regionOpeningStateList.get(i); + if (openingState != RegionOpeningState.OPENED) { + HRegionInfo region = regionOpenInfos.get(i).getFirst(); + if (openingState == RegionOpeningState.ALREADY_OPENED) { + processAlreadyOpenedRegion(region, destination); + } else if (openingState == RegionOpeningState.FAILED_OPENING) { + // Failed opening this region, reassign it later + failedToOpenRegions.add(region); + } else { + LOG.warn("THIS SHOULD NOT HAPPEN: unknown opening state " + + openingState + " in assigning region " + region); + } + } + } + break; + } catch (IOException e) { + if (e instanceof RemoteException) { + e = ((RemoteException)e).unwrapRemoteException(); + } + if (e instanceof RegionServerStoppedException) { + LOG.warn("The region server was shut down, ", e); + // No need to retry, the region server is a goner. + return false; + } else if (e instanceof ServerNotRunningYetException) { + // This is the one exception to retry. For all else we should just fail + // the startup. + long now = System.currentTimeMillis(); + if (now < maxWaitTime) { + LOG.debug("Server is not yet up; waiting up to " + + (maxWaitTime - now) + "ms", e); + Thread.sleep(100); + continue; + } + } + throw e; + } + } + } catch (IOException e) { + // Can be a socket timeout, EOF, NoRouteToHost, etc + LOG.info("Unable to communicate with the region server in order" + " to assign regions", e); - return false; - } catch (InterruptedException e) { - throw new RuntimeException(e); + return false; + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } finally { + for (Lock lock : locks.values()) { + lock.unlock(); + } + } + + if (!failedToOpenRegions.isEmpty()) { + for (HRegionInfo region : failedToOpenRegions) { + invokeAssign(region); + } } LOG.debug("Bulk assigning done for " + destination.toString()); return true; } /** - * Callback handler for create unassigned znodes used during bulk assign. + * Send CLOSE RPC if the server is online, otherwise, offline the region */ - static class CreateUnassignedAsyncCallback implements AsyncCallback.StringCallback { - private final Log LOG = LogFactory.getLog(CreateUnassignedAsyncCallback.class); - private final RegionStates regionStates; - private final ZooKeeperWatcher zkw; - private final ServerName destination; - private final AtomicInteger counter; - - CreateUnassignedAsyncCallback(final RegionStates regionStates, - final ZooKeeperWatcher zkw, final ServerName destination, - final AtomicInteger counter) { - this.regionStates = regionStates; - this.zkw = zkw; - this.destination = destination; - this.counter = counter; + private void unassign(final HRegionInfo region, + final RegionState state, final int versionOfClosingNode, + final ServerName dest, final boolean transitionInZK) { + // Send CLOSE RPC + ServerName server = state.getServerName(); + // ClosedRegionhandler can remove the server from this.regions + if (!serverManager.isServerOnline(server)) { + // delete the node. if no node exists need not bother. + deleteClosingOrClosedNode(region); + regionOffline(region); + return; } - @Override - public void processResult(int rc, String path, Object ctx, String name) { - if (rc == KeeperException.Code.NODEEXISTS.intValue()) { - LOG.warn("Node for " + path + " already exists"); - } else if (rc != 0) { - // This is result code. If non-zero, need to resubmit. - LOG.warn("rc != 0 for " + path + " -- retryable connectionloss -- " + - "FIX see http://wiki.apache.org/hadoop/ZooKeeper/FAQ#A2"); - this.zkw.abort("Connectionloss writing unassigned at " + path + - ", rc=" + rc, null); - return; + for (int i = 1; i <= this.maximumAttempts; i++) { + try { + if (serverManager.sendRegionClose(server, region, + versionOfClosingNode, dest, transitionInZK)) { + LOG.debug("Sent CLOSE to " + server + " for region " + + region.getRegionNameAsString()); + return; + } + // This never happens. Currently regionserver close always return true. + LOG.warn("Server " + server + " region CLOSE RPC returned false for " + + region.getRegionNameAsString()); + } catch (Throwable t) { + if (t instanceof RemoteException) { + t = ((RemoteException)t).unwrapRemoteException(); + } + if (t instanceof NotServingRegionException) { + deleteClosingOrClosedNode(region); + regionOffline(region); + return; + } else if (t instanceof RegionAlreadyInTransitionException) { + // RS is already processing this region, only need to update the timestamp + LOG.debug("update " + state + " the timestamp."); + state.updateTimestampToNow(); + } + LOG.info("Server " + server + " returned " + t + " for " + + region.getRegionNameAsString() + ", try=" + i + + " of " + this.maximumAttempts, t); + // Presume retry or server will expire. } - LOG.debug("rs=" + (RegionState)ctx + ", server=" + this.destination.toString()); - // Async exists to set a watcher so we'll get triggered when - // unassigned node changes. - this.zkw.getRecoverableZooKeeper().getZooKeeper().exists(path, this.zkw, - new ExistsUnassignedAsyncCallback(regionStates, counter, destination), ctx); } } /** - * Callback handler for the exists call that sets watcher on unassigned znodes. - * Used during bulk assign on startup. + * Set region to OFFLINE unless it is opening and forceNewPlan is false. */ - static class ExistsUnassignedAsyncCallback implements AsyncCallback.StatCallback { - private final Log LOG = LogFactory.getLog(ExistsUnassignedAsyncCallback.class); - private final RegionStates regionStates; - private final AtomicInteger counter; - private ServerName destination; - - ExistsUnassignedAsyncCallback(final RegionStates regionStates, - final AtomicInteger counter, ServerName destination) { - this.regionStates = regionStates; - this.counter = counter; - this.destination = destination; - } - - @Override - public void processResult(int rc, String path, Object ctx, Stat stat) { - if (rc != 0) { - // This is result code. If non-zero, need to resubmit. - LOG.warn("rc != 0 for " + path + " -- retryable connectionloss -- " + - "FIX see http://wiki.apache.org/hadoop/ZooKeeper/FAQ#A2"); - return; - } - RegionState state = (RegionState)ctx; - LOG.debug("rs=" + state); - // Transition RegionState to PENDING_OPEN here in master; means we've - // sent the open. We're a little ahead of ourselves here since we've not - // yet sent out the actual open but putting this state change after the - // call to open risks our writing PENDING_OPEN after state has been moved - // to OPENING by the regionserver. - regionStates.updateRegionState(state.getRegion(), - RegionState.State.PENDING_OPEN, System.currentTimeMillis(), - destination); - this.counter.addAndGet(1); - } - } - - /** - * Sets regions {@link RegionState} to {@link RegionState.State#OFFLINE}. - * @param region - * @return Amended RegionState. - */ - private RegionState forceRegionStateToOffline(final HRegionInfo region) { - return forceRegionStateToOffline(region, false); - } - - /** - * Sets regions {@link RegionState} to {@link RegionState.State#OFFLINE}. - * @param region - * @param hijack - * @return Amended RegionState. - */ - private RegionState forceRegionStateToOffline(final HRegionInfo region, - boolean hijack) { - String encodedName = region.getEncodedName(); - - Lock lock = locker.acquireLock(encodedName); - try { - RegionState state = regionStates.getRegionTransitionState(encodedName); - if (state == null) { - state = regionStates.updateRegionState( - region, RegionState.State.OFFLINE); - } else { - // If we are reassigning the node do not force in-memory state to OFFLINE. - // Based on the znode state we will decide if to change in-memory state to - // OFFLINE or not. It will be done before setting znode to OFFLINE state. - - // We often get here with state == CLOSED because ClosedRegionHandler will - // assign on its tail as part of the handling of a region close. - if (!hijack) { + private RegionState forceRegionStateToOffline( + final HRegionInfo region, final boolean forceNewPlan) { + RegionState state = regionStates.getRegionState(region); + if (state == null) { + LOG.warn("Assigning a region not in region states: " + region); + state = regionStates.createRegionState(region); + } else { + switch (state.getState()) { + case OPEN: + case OPENING: + case PENDING_OPEN: + if (!forceNewPlan) { + LOG.debug("Attempting to assign region " + + region + " but it is already in transition: " + state); + return null; + } + case CLOSING: + case PENDING_CLOSE: + unassign(region, state, -1, null, false); + case CLOSED: + if (!state.isOffline()) { LOG.debug("Forcing OFFLINE; was=" + state); state = regionStates.updateRegionState( region, RegionState.State.OFFLINE); } + case OFFLINE: + break; + default: + LOG.error("Trying to assign region " + region + + ", which is in state " + state); + return null; } - return state; - } finally { - lock.unlock(); } + return state; } /** @@ -1393,35 +1409,41 @@ public class AssignmentManager extends ZooKeeperListener { * @param state * @param setOfflineInZK * @param forceNewPlan - * @param hijack */ - private void assign(final HRegionInfo region, final RegionState state, - final boolean setOfflineInZK, final boolean forceNewPlan, - boolean hijack) { - boolean regionAlreadyInTransitionException = false; - boolean serverNotRunningYet = false; + private void assign(RegionState state, + final boolean setOfflineInZK, final boolean forceNewPlan) { RegionState currentState = state; + int versionOfOfflineNode = -1; + RegionPlan plan = null; long maxRegionServerStartupWaitTime = -1; - for (int i = 0; i < this.maximumAssignmentAttempts; i++) { - int versionOfOfflineNode = -1; - if (setOfflineInZK) { + HRegionInfo region = state.getRegion(); + for (int i = 1; i <= this.maximumAttempts; i++) { + if (plan == null) { // Get a server for the region at first + plan = getRegionPlan(region, forceNewPlan); + } + if (plan == null) { + LOG.debug("Unable to determine a plan to assign " + region); + this.timeoutMonitor.setAllRegionServersOffline(true); + return; // Should get reassigned later when RIT times out. + } + if (setOfflineInZK && versionOfOfflineNode == -1) { // get the version of the znode after setting it to OFFLINE. // versionOfOfflineNode will be -1 if the znode was not set to OFFLINE - versionOfOfflineNode = setOfflineInZooKeeper(currentState, hijack); + versionOfOfflineNode = setOfflineInZooKeeper(currentState, plan.getDestination()); if (versionOfOfflineNode != -1) { if (isDisabledorDisablingRegionInRIT(region)) { return; } // In case of assignment from EnableTableHandler table state is ENABLING. Any how // EnableTableHandler will set ENABLED after assigning all the table regions. If we - // try to set to ENABLED directly then client api may think table is enabled. + // try to set to ENABLED directly then client API may think table is enabled. // When we have a case such as all the regions are added directly into .META. and we call // assignRegion then we need to make the table ENABLED. Hence in such case the table // will not be in ENABLING or ENABLED state. String tableName = region.getTableNameAsString(); if (!zkTable.isEnablingTable(tableName) && !zkTable.isEnabledTable(tableName)) { LOG.debug("Setting table " + tableName + " to ENABLED state."); - setEnabledTable(region); + setEnabledTable(tableName); } } } @@ -1429,29 +1451,21 @@ public class AssignmentManager extends ZooKeeperListener { return; } if (this.server.isStopped()) { - LOG.debug("Server stopped; skipping assign of " + state); + LOG.debug("Server stopped; skipping assign of " + region); return; } - RegionPlan plan = getRegionPlan(state, - !regionAlreadyInTransitionException && !serverNotRunningYet && forceNewPlan); - if (plan == null) { - LOG.debug("Unable to determine a plan to assign " + state); - this.timeoutMonitor.setAllRegionServersOffline(true); - return; // Should get reassigned later when RIT times out. - } try { - LOG.info("Assigning region " + state.getRegion().getRegionNameAsString() + + LOG.info("Assigning region " + region.getRegionNameAsString() + " to " + plan.getDestination().toString()); // Transition RegionState to PENDING_OPEN - currentState = regionStates.updateRegionState(state.getRegion(), - RegionState.State.PENDING_OPEN, System.currentTimeMillis(), - plan.getDestination()); + currentState = regionStates.updateRegionState(region, + RegionState.State.PENDING_OPEN, plan.getDestination()); // Send OPEN RPC. This can fail if the server on other end is is not up. // Pass the version that was obtained while setting the node to OFFLINE. RegionOpeningState regionOpenState = serverManager.sendRegionOpen(plan - .getDestination(), state.getRegion(), versionOfOfflineNode); + .getDestination(), region, versionOfOfflineNode); if (regionOpenState == RegionOpeningState.ALREADY_OPENED) { - processAlreadyOpenedRegion(state.getRegion(), plan.getDestination()); + processAlreadyOpenedRegion(region, plan.getDestination()); } else if (regionOpenState == RegionOpeningState.FAILED_OPENING) { // Failed opening this region throw new Exception("Get regionOpeningState=" + regionOpenState); @@ -1461,13 +1475,14 @@ public class AssignmentManager extends ZooKeeperListener { if (t instanceof RemoteException) { t = ((RemoteException) t).unwrapRemoteException(); } - regionAlreadyInTransitionException = false; - serverNotRunningYet = false; + boolean regionAlreadyInTransitionException = false; + boolean serverNotRunningYet = false; + boolean socketTimedOut = false; if (t instanceof RegionAlreadyInTransitionException) { regionAlreadyInTransitionException = true; if (LOG.isDebugEnabled()) { LOG.debug("Failed assignment in: " + plan.getDestination() + " due to " - + t.getMessage()); + + t.getMessage()); } } else if (t instanceof ServerNotRunningYetException) { if (maxRegionServerStartupWaitTime < 0) { @@ -1488,52 +1503,72 @@ public class AssignmentManager extends ZooKeeperListener { } } catch (InterruptedException ie) { LOG.warn("Failed to assign " - + state.getRegion().getRegionNameAsString() + " since interrupted", ie); + + region.getRegionNameAsString() + " since interrupted", ie); Thread.currentThread().interrupt(); return; } - } - if (t instanceof java.net.SocketTimeoutException + } else if (t instanceof java.net.SocketTimeoutException && this.serverManager.isServerOnline(plan.getDestination())) { - LOG.warn("Call openRegion() to " + plan.getDestination() + // In case socket is timed out and the region server is still online, + // the openRegion RPC could have been accepted by the server and + // just the response isn't gone through. So we will retry to + // open the region on the same server to avoid possible + // double assignment. + socketTimedOut = true; + if (LOG.isDebugEnabled()) { + LOG.debug("Call openRegion() to " + plan.getDestination() + " has timed out when trying to assign " + region.getRegionNameAsString() + ", but the region might already be opened on " + plan.getDestination() + ".", t); - return; + } } + LOG.warn("Failed assignment of " - + state.getRegion().getRegionNameAsString() + + region.getRegionNameAsString() + " to " + plan.getDestination() + ", trying to assign " - + (regionAlreadyInTransitionException || serverNotRunningYet - ? "to the same region server because of " - + "RegionAlreadyInTransitionException/ServerNotRunningYetException;" - : "elsewhere instead; ") - + "retry=" + i, t); - // Clean out plan we failed execute and one that doesn't look like it'll - // succeed anyways; we need a new plan! - // Transition back to OFFLINE - currentState = regionStates.updateRegionState( - state.getRegion(), RegionState.State.OFFLINE); + + (regionAlreadyInTransitionException || serverNotRunningYet || socketTimedOut + ? "to the same region server because of RegionAlreadyInTransitionException" + + "/ServerNotRunningYetException/SocketTimeoutException;" + : "elsewhere instead; ") + + "try=" + i + " of " + this.maximumAttempts, t); + + if (i == this.maximumAttempts) { + // Don't reset the region state or get a new plan any more. + // This is the last try. + continue; + } + // If region opened on destination of present plan, reassigning to new // RS may cause double assignments. In case of RegionAlreadyInTransitionException // reassigning to same RS. RegionPlan newPlan = plan; - if (!regionAlreadyInTransitionException && !serverNotRunningYet) { + if (!(regionAlreadyInTransitionException + || serverNotRunningYet || socketTimedOut)) { // Force a new plan and reassign. Will return null if no servers. // The new plan could be the same as the existing plan since we don't // exclude the server of the original plan, which should not be // excluded since it could be the only server up now. - newPlan = getRegionPlan(state, true); + newPlan = getRegionPlan(region, true); } if (newPlan == null) { this.timeoutMonitor.setAllRegionServersOffline(true); LOG.warn("Unable to find a viable location to assign region " + - state.getRegion().getRegionNameAsString()); + region.getRegionNameAsString()); return; } + if (plan != newPlan + && !plan.getDestination().equals(newPlan.getDestination())) { + // Clean out plan we failed execute and one that doesn't look like it'll + // succeed anyways; we need a new plan! + // Transition back to OFFLINE + currentState = regionStates.updateRegionState( + region, RegionState.State.OFFLINE); + versionOfOfflineNode = -1; + plan = newPlan; + } } } } @@ -1577,44 +1612,22 @@ public class AssignmentManager extends ZooKeeperListener { * Set region as OFFLINED up in zookeeper * * @param state - * @param hijack - * - true if needs to be hijacked and reassigned, false otherwise. * @return the version of the offline node if setting of the OFFLINE node was * successful, -1 otherwise. */ - int setOfflineInZooKeeper(final RegionState state, boolean hijack) { - // In case of reassignment the current state in memory need not be - // OFFLINE. - if (!hijack && !state.isClosed() && !state.isOffline()) { + private int setOfflineInZooKeeper(final RegionState state, final ServerName destination) { + if (!state.isClosed() && !state.isOffline()) { String msg = "Unexpected state : " + state + " .. Cannot transit it to OFFLINE."; this.server.abort(msg, new IllegalStateException(msg)); return -1; } - boolean allowZNodeCreation = false; - // Under reassignment if the current state is PENDING_OPEN - // or OPENING then refresh the in-memory state to PENDING_OPEN. This is - // important because if the region was in - // RS_OPENING state for a long time the master will try to force the znode - // to OFFLINE state meanwhile the RS could have opened the corresponding - // region and the state in znode will be RS_ZK_REGION_OPENED. - // For all other cases we can change the in-memory state to OFFLINE. - if (hijack && - (state.getState().equals(RegionState.State.PENDING_OPEN) || - state.getState().equals(RegionState.State.OPENING))) { - regionStates.updateRegionState(state.getRegion(), - RegionState.State.PENDING_OPEN); - allowZNodeCreation = false; - } else { - regionStates.updateRegionState(state.getRegion(), - RegionState.State.OFFLINE); - allowZNodeCreation = true; - } + regionStates.updateRegionState(state.getRegion(), + RegionState.State.OFFLINE); int versionOfOfflineNode = -1; try { // get the version after setting the znode to OFFLINE versionOfOfflineNode = ZKAssign.createOrForceNodeOffline(watcher, - state.getRegion(), this.server.getServerName(), - hijack, allowZNodeCreation); + state.getRegion(), destination); if (versionOfOfflineNode == -1) { LOG.warn("Attempted to create/force node into OFFLINE state before " + "completing assignment but failed to do so for " + state); @@ -1628,57 +1641,28 @@ public class AssignmentManager extends ZooKeeperListener { } /** - * Set region as OFFLINED up in zookeeper asynchronously. - * @param state - * @return True if we succeeded, false otherwise (State was incorrect or failed - * updating zk). - */ - boolean asyncSetOfflineInZooKeeper(final RegionState state, - final AsyncCallback.StringCallback cb, final Object ctx) { - if (!state.isClosed() && !state.isOffline()) { - this.server.abort("Unexpected state trying to OFFLINE; " + state, - new IllegalStateException()); - return false; - } - regionStates.updateRegionState( - state.getRegion(), RegionState.State.OFFLINE); - try { - ZKAssign.asyncCreateNodeOffline(watcher, state.getRegion(), - this.server.getServerName(), cb, ctx); - } catch (KeeperException e) { - if (e instanceof NodeExistsException) { - LOG.warn("Node for " + state.getRegion() + " already exists"); - } else { - server.abort("Unexpected ZK exception creating/setting node OFFLINE", e); - } - return false; - } - return true; - } - - /** - * @param state - * @return Plan for passed state (If none currently, it creates one or + * @param region the region to assign + * @return Plan for passed region (If none currently, it creates one or * if no servers to assign, it returns null). */ - RegionPlan getRegionPlan(final RegionState state, + private RegionPlan getRegionPlan(final HRegionInfo region, final boolean forceNewPlan) { - return getRegionPlan(state, null, forceNewPlan); + return getRegionPlan(region, null, forceNewPlan); } /** - * @param state + * @param region the region to assign * @param serverToExclude Server to exclude (we know its bad). Pass null if * all servers are thought to be assignable. * @param forceNewPlan If true, then if an existing plan exists, a new plan * will be generated. - * @return Plan for passed state (If none currently, it creates one or + * @return Plan for passed region (If none currently, it creates one or * if no servers to assign, it returns null). */ - RegionPlan getRegionPlan(final RegionState state, + private RegionPlan getRegionPlan(final HRegionInfo region, final ServerName serverToExclude, final boolean forceNewPlan) { // Pickup existing plan or make a new one - final String encodedName = state.getRegion().getEncodedName(); + final String encodedName = region.getEncodedName(); final List destServers = serverManager.createDestinationServersList(serverToExclude); @@ -1696,9 +1680,8 @@ public class AssignmentManager extends ZooKeeperListener { existingPlan = this.regionPlans.get(encodedName); if (existingPlan != null && existingPlan.getDestination() != null) { - LOG.debug("Found an existing plan for " + - state.getRegion().getRegionNameAsString() + - " destination server is " + existingPlan.getDestination().toString()); + LOG.debug("Found an existing plan for " + region.getRegionNameAsString() + + " destination server is " + existingPlan.getDestination()); } if (forceNewPlan @@ -1706,15 +1689,15 @@ public class AssignmentManager extends ZooKeeperListener { || existingPlan.getDestination() == null || !destServers.contains(existingPlan.getDestination())) { newPlan = true; - randomPlan = new RegionPlan(state.getRegion(), null, - balancer.randomAssignment(state.getRegion(), destServers)); + randomPlan = new RegionPlan(region, null, + balancer.randomAssignment(region, destServers)); this.regionPlans.put(encodedName, randomPlan); } } if (newPlan) { LOG.debug("No previous transition plan was found (or we are ignoring " + - "an existing plan) for " + state.getRegion().getRegionNameAsString() + + "an existing plan) for " + region.getRegionNameAsString() + " so generated a random one; " + randomPlan + "; " + serverManager.countOfRegionServers() + " (online=" + serverManager.getOnlineServers().size() + @@ -1722,8 +1705,8 @@ public class AssignmentManager extends ZooKeeperListener { return randomPlan; } LOG.debug("Using pre-existing plan for region " + - state.getRegion().getRegionNameAsString() + "; plan=" + existingPlan); - return existingPlan; + region.getRegionNameAsString() + "; plan=" + existingPlan); + return existingPlan; } /** @@ -1794,13 +1777,6 @@ public class AssignmentManager extends ZooKeeperListener { LOG.debug("Starting unassignment of region " + region.getRegionNameAsString() + " (offlining)"); - // Check if this region is currently assigned - if (!regionStates.isRegionAssigned(region)) { - LOG.debug("Attempted to unassign region " + - region.getRegionNameAsString() + " but it is not " + - "currently assigned anywhere"); - return; - } String encodedName = region.getEncodedName(); // Grab the state of this region and synchronize on it int versionOfClosingNode = -1; @@ -1812,8 +1788,15 @@ public class AssignmentManager extends ZooKeeperListener { if (state == null) { // Create the znode in CLOSING state try { + state = regionStates.getRegionState(region); + if (state == null || state.getServerName() == null) { + // We don't know where the region is, offline it. + // No need to send CLOSE RPC + regionOffline(region); + return; + } versionOfClosingNode = ZKAssign.createNodeClosing( - watcher, region, server.getServerName()); + watcher, region, state.getServerName()); if (versionOfClosingNode == -1) { LOG.debug("Attempting to unassign region " + region.getRegionNameAsString() + " but ZK closing node " @@ -1862,57 +1845,11 @@ public class AssignmentManager extends ZooKeeperListener { "already in transition (" + state.getState() + ", force=" + force + ")"); return; } + + unassign(region, state, versionOfClosingNode, dest, true); } finally { lock.unlock(); } - - // Send CLOSE RPC - ServerName server = state.getServerName(); - // ClosedRegionhandler can remove the server from this.regions - if (server == null) { - // delete the node. if no node exists need not bother. - deleteClosingOrClosedNode(region); - return; - } - - try { - // TODO: We should consider making this look more like it does for the - // region open where we catch all throwables and never abort - if (serverManager.sendRegionClose(server, state.getRegion(), - versionOfClosingNode, dest)) { - LOG.debug("Sent CLOSE to " + server + " for region " + - region.getRegionNameAsString()); - return; - } - // This never happens. Currently regionserver close always return true. - LOG.warn("Server " + server + " region CLOSE RPC returned false for " + - region.getRegionNameAsString()); - } catch (Throwable t) { - if (t instanceof RemoteException) { - t = ((RemoteException)t).unwrapRemoteException(); - } - if (t instanceof NotServingRegionException) { - // Presume that master has stale data. Presume remote side just split. - // Presume that the split message when it comes in will fix up the master's - // in memory cluster state. - if (getZKTable().isDisablingTable(region.getTableNameAsString())) { - // Remove from the regionsinTransition map - LOG.info("While trying to recover the table " - + region.getTableNameAsString() - + " to DISABLED state the region " + region - + " was offlined but the table was in DISABLING state"); - regionStates.regionOffline(region); - deleteClosingOrClosedNode(region); - } - } else if (t instanceof RegionAlreadyInTransitionException) { - // RS is already processing this region, only need to update the timestamp - LOG.debug("update " + state + " the timestamp."); - state.updateTimestampToNow(); - } - LOG.info("Server " + server + " returned " + t + " for " + - region.getRegionNameAsString(), t); - // Presume retry or server will expire. - } } public void unassign(HRegionInfo region, boolean force){ @@ -1954,7 +1891,7 @@ public class AssignmentManager extends ZooKeeperListener { * @throws DeserializationException */ private boolean isSplitOrSplitting(final String path) - throws KeeperException, DeserializationException { + throws KeeperException, DeserializationException { boolean result = false; // This may fail if the SPLIT or SPLITTING znode gets cleaned up before we // can get data from it. @@ -1981,7 +1918,7 @@ public class AssignmentManager extends ZooKeeperListener { * @throws InterruptedException */ public void waitForAssignment(HRegionInfo regionInfo) - throws InterruptedException { + throws InterruptedException { while(!this.server.isStopped() && !regionStates.isRegionAssigned(regionInfo)) { // We should receive a notification, but it's @@ -2019,6 +1956,35 @@ public class AssignmentManager extends ZooKeeperListener { assign(HRegionInfo.FIRST_META_REGIONINFO, true); } + /** + * Assigns specified regions retaining assignments, if any. + *

+ * This is a synchronous call and will return once every region has been + * assigned. If anything fails, an exception is thrown + * @throws InterruptedException + * @throws IOException + */ + public void assign(Map regions) + throws IOException, InterruptedException { + if (regions == null || regions.isEmpty()) { + return; + } + List servers = serverManager.createDestinationServersList(); + if (servers == null || servers.isEmpty()) { + throw new IOException("Found no destination server to assign region(s)"); + } + + // Reuse existing assignment info + Map> bulkPlan = + balancer.retainAssignment(regions, servers); + + LOG.info("Bulk assigning " + regions.size() + " region(s) across " + + servers.size() + " server(s), retainAssignment=true"); + BulkAssigner ba = new GeneralBulkAssigner(this.server, bulkPlan, this); + ba.bulkAssign(); + LOG.info("Bulk assigning done"); + } + /** * Assigns specified regions round robin, if any. *

@@ -2050,18 +2016,6 @@ public class AssignmentManager extends ZooKeeperListener { LOG.info("Bulk assigning done"); } - // TODO: This method seems way wrong. Why would we mark a table enabled based - // off a single region? We seem to call this on bulk assign on startup which - // isn't too bad but then its also called in assign. It makes the enabled - // flag up in zk meaningless. St.Ack - private void setEnabledTable(HRegionInfo hri) { - String tableName = hri.getTableNameAsString(); - boolean isTableEnabled = this.zkTable.isEnabledTable(tableName); - if (!isTableEnabled) { - setEnabledTable(tableName); - } - } - /** * Assigns all user regions, if any exist. Used during cluster startup. *

@@ -2095,27 +2049,17 @@ public class AssignmentManager extends ZooKeeperListener { getBoolean("hbase.master.startup.retainassign", true); if (retainAssignment) { - List servers = serverManager.createDestinationServersList(); - if (servers == null || servers.isEmpty()) { - throw new IOException("Found no destination server to assign region(s)"); - } - - // Reuse existing assignment info - Map> bulkPlan = - balancer.retainAssignment(allRegions, servers); - - LOG.info("Bulk assigning " + allRegions.size() + " region(s) across " + - servers.size() + " server(s), retainAssignment=true"); - BulkAssigner ba = new GeneralBulkAssigner(this.server, bulkPlan, this); - ba.bulkAssign(); - LOG.info("Bulk assigning done"); + assign(allRegions); } else { List regions = new ArrayList(allRegions.keySet()); assign(regions); } for (HRegionInfo hri : allRegions.keySet()) { - setEnabledTable(hri); + String tableName = hri.getTableNameAsString(); + if (!zkTable.isEnabledTable(tableName)) { + setEnabledTable(tableName); + } } } @@ -2126,7 +2070,7 @@ public class AssignmentManager extends ZooKeeperListener { * @throws InterruptedException */ boolean waitUntilNoRegionsInTransition(final long timeout) - throws InterruptedException { + throws InterruptedException { // Blocks until there are no regions in transition. It is possible that // there // are regions in transition immediately after this returns but guarantees @@ -2301,7 +2245,7 @@ public class AssignmentManager extends ZooKeeperListener { */ private void processDeadServersAndRecoverLostRegions( Map> deadServers, List nodes) - throws IOException, KeeperException { + throws IOException, KeeperException { if (deadServers != null) { for (Map.Entry> server: deadServers.entrySet()) { ServerName serverName = server.getKey(); @@ -2314,7 +2258,7 @@ public class AssignmentManager extends ZooKeeperListener { this.watcher, this.watcher.assignmentZNode); if (!nodes.isEmpty()) { for (String encodedRegionName : nodes) { - processRegionInTransition(encodedRegionName, null, deadServers); + processRegionInTransition(encodedRegionName, null); } } @@ -2419,10 +2363,9 @@ public class AssignmentManager extends ZooKeeperListener { * Monitor to check for time outs on region transition operations */ public class TimeoutMonitor extends Chore { - private final int timeout; - private boolean bulkAssign = false; private boolean allRegionServersOffline = false; private ServerManager serverManager; + private final int timeout; /** * Creates a periodic monitor to check for time outs on region transition @@ -2441,17 +2384,6 @@ public class AssignmentManager extends ZooKeeperListener { this.serverManager = serverManager; } - /** - * @param bulkAssign If true, we'll suspend checking regions in transition - * up in zookeeper. If false, will reenable check. - * @return Old setting for bulkAssign. - */ - public boolean bulkAssign(final boolean bulkAssign) { - boolean result = this.bulkAssign; - this.bulkAssign = bulkAssign; - return result; - } - private synchronized void setAllRegionServersOffline( boolean allRegionServersOffline) { this.allRegionServersOffline = allRegionServersOffline; @@ -2459,21 +2391,21 @@ public class AssignmentManager extends ZooKeeperListener { @Override protected void chore() { - // If bulkAssign in progress, suspend checks - if (this.bulkAssign) return; boolean noRSAvailable = this.serverManager.createDestinationServersList().isEmpty(); // Iterate all regions in transition checking for time outs long now = System.currentTimeMillis(); // no lock concurrent access ok: we will be working on a copy, and it's java-valid to do // a copy while another thread is adding/removing items - for (RegionState regionState : regionStates.getRegionsInTransition().values()) { + for (String regionName : regionStates.getRegionsInTransition().keySet()) { + RegionState regionState = regionStates.getRegionTransitionState(regionName); + if (regionState == null) continue; + if (regionState.getStamp() + timeout <= now) { // decide on action upon timeout actOnTimeOut(regionState); } else if (this.allRegionServersOffline && !noRSAvailable) { - RegionPlan existingPlan = regionPlans.get(regionState.getRegion() - .getEncodedName()); + RegionPlan existingPlan = regionPlans.get(regionName); if (existingPlan == null || !this.serverManager.isServerOnline(existingPlan .getDestination())) { @@ -2541,7 +2473,7 @@ public class AssignmentManager extends ZooKeeperListener { } private void processOpeningState(HRegionInfo regionInfo) { - LOG.info("Region has been OPENING for too " + "long, reassigning region=" + LOG.info("Region has been OPENING for too long, reassigning region=" + regionInfo.getRegionNameAsString()); // Should have a ZK node in OPENING state try { @@ -2573,7 +2505,7 @@ public class AssignmentManager extends ZooKeeperListener { return; } - private void invokeAssign(HRegionInfo regionInfo) { + void invokeAssign(HRegionInfo regionInfo) { threadPoolExecutorService.submit(new AssignCallable(this, regionInfo)); } @@ -2581,11 +2513,11 @@ public class AssignmentManager extends ZooKeeperListener { threadPoolExecutorService.submit(new UnAssignCallable(this, regionInfo)); } - public boolean isCarryingRoot(ServerName serverName) { + boolean isCarryingRoot(ServerName serverName) { return isCarryingRegion(serverName, HRegionInfo.ROOT_REGIONINFO); } - public boolean isCarryingMeta(ServerName serverName) { + boolean isCarryingMeta(ServerName serverName) { return isCarryingRegion(serverName, HRegionInfo.FIRST_META_REGIONINFO); } @@ -2599,7 +2531,7 @@ public class AssignmentManager extends ZooKeeperListener { * processing hasn't finished yet when server shutdown occurs. * @return whether the serverName currently hosts the region */ - public boolean isCarryingRegion(ServerName serverName, HRegionInfo hri) { + private boolean isCarryingRegion(ServerName serverName, HRegionInfo hri) { RegionTransition rt = null; try { byte [] data = ZKAssign.getData(watcher, hri.getEncodedName()); @@ -2713,4 +2645,34 @@ public class AssignmentManager extends ZooKeeperListener { this.server.abort(errorMsg, e); } } + + /** + * Set region as OFFLINED up in zookeeper asynchronously. + * @param state + * @return True if we succeeded, false otherwise (State was incorrect or failed + * updating zk). + */ + private boolean asyncSetOfflineInZooKeeper(final RegionState state, + final ZooKeeperWatcher zkw, final AsyncCallback.StringCallback cb, + final ServerName destination) { + if (!state.isClosed() && !state.isOffline()) { + this.server.abort("Unexpected state trying to OFFLINE; " + state, + new IllegalStateException()); + return false; + } + regionStates.updateRegionState( + state.getRegion(), RegionState.State.OFFLINE); + try { + ZKAssign.asyncCreateNodeOffline(zkw, state.getRegion(), + destination, cb, state); + } catch (KeeperException e) { + if (e instanceof NodeExistsException) { + LOG.warn("Node for " + state.getRegion() + " already exists"); + } else { + server.abort("Unexpected ZK exception creating/setting node OFFLINE", e); + } + return false; + } + return true; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java index 242616c313d..dd5a885ea57 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.master; -import java.io.IOException; import java.lang.Thread.UncaughtExceptionHandler; import java.util.ArrayList; import java.util.HashSet; @@ -60,19 +59,6 @@ public class GeneralBulkAssigner extends BulkAssigner { this.assignmentManager = am; } - @Override - public boolean bulkAssign(boolean sync) throws InterruptedException, - IOException { - // Disable timing out regions in transition up in zk while bulk assigning. - this.assignmentManager.timeoutMonitor.bulkAssign(true); - try { - return super.bulkAssign(sync); - } finally { - // Re-enable timing out regions in transition up in zk. - this.assignmentManager.timeoutMonitor.bulkAssign(false); - } - } - @Override protected String getThreadNamePrefix() { return this.server.getServerName() + "-GeneralBulkAssigner"; @@ -80,7 +66,7 @@ public class GeneralBulkAssigner extends BulkAssigner { @Override protected void populatePool(ExecutorService pool) { - this.pool = pool; // shut it down later in case some assigner hangs + this.pool = pool; // shut it down later in case some assigner hangs for (Map.Entry> e: this.bulkPlan.entrySet()) { pool.execute(new SingleServerBulkAssigner(e.getKey(), e.getValue(), this.assignmentManager, this.failedPlans)); @@ -204,7 +190,7 @@ public class GeneralBulkAssigner extends BulkAssigner { reassigningRegions.addAll(failedPlans.remove(e.getKey())); } for (HRegionInfo region : reassigningRegions) { - assignmentManager.assign(region, true, true); + assignmentManager.invokeAssign(region); } return reassigningRegions.size(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 2d271117d2d..0559efaab95 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -673,6 +673,7 @@ Server { if (!masterRecovery) { this.assignmentManager.startTimeOutMonitor(); } + // TODO: Should do this in background rather than block master startup status.setStatus("Splitting logs after master startup"); splitLogAfterStartup(this.fileSystemManager); @@ -2136,12 +2137,12 @@ Server { return arr; } } - assignRegion(regionInfo); - if (cpHost != null) { - cpHost.postAssign(regionInfo); - } + assignmentManager.assign(regionInfo, true, true); + if (cpHost != null) { + cpHost.postAssign(regionInfo); + } - return arr; + return arr; } catch (IOException ioe) { throw new ServiceException(ioe); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/OfflineCallback.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/OfflineCallback.java new file mode 100644 index 00000000000..bb985d79381 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/OfflineCallback.java @@ -0,0 +1,123 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.zookeeper.AsyncCallback.StringCallback; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.ZooKeeper; +import org.apache.zookeeper.data.Stat; + +/** + * Callback handler for creating unassigned offline znodes + * used during bulk assign, async setting region to offline. + */ +@InterfaceAudience.Private +public class OfflineCallback implements StringCallback { + private final Log LOG = LogFactory.getLog(OfflineCallback.class); + private final ExistCallback callBack; + private final ZooKeeperWatcher zkw; + private final ServerName destination; + + OfflineCallback(final RegionStates regionStates, + final ZooKeeperWatcher zkw, final ServerName destination, + final AtomicInteger counter, final Map offlineNodesVersions) { + this.callBack = new ExistCallback( + regionStates, counter, destination, offlineNodesVersions); + this.destination = destination; + this.zkw = zkw; + } + + @Override + public void processResult(int rc, String path, Object ctx, String name) { + if (rc == KeeperException.Code.NODEEXISTS.intValue()) { + LOG.warn("Node for " + path + " already exists"); + } else if (rc != 0) { + // This is result code. If non-zero, need to resubmit. + LOG.warn("rc != 0 for " + path + " -- retryable connectionloss -- " + + "FIX see http://wiki.apache.org/hadoop/ZooKeeper/FAQ#A2"); + this.zkw.abort("Connectionloss writing unassigned at " + path + + ", rc=" + rc, null); + return; + } + if (LOG.isDebugEnabled()) { + LOG.debug("rs=" + (RegionState)ctx + + ", server=" + this.destination.toString()); + } + // Async exists to set a watcher so we'll get triggered when + // unassigned node changes. + ZooKeeper zk = this.zkw.getRecoverableZooKeeper().getZooKeeper(); + zk.exists(path, this.zkw, callBack, ctx); + } + + /** + * Callback handler for the exists call that sets watcher on unassigned znodes. + * Used during bulk assign on startup. + */ + static class ExistCallback implements StatCallback { + private final Log LOG = LogFactory.getLog(ExistCallback.class); + private final Map offlineNodesVersions; + private final RegionStates regionStates; + private final AtomicInteger counter; + private ServerName destination; + + ExistCallback(final RegionStates regionStates, + final AtomicInteger counter, ServerName destination, + final Map offlineNodesVersions) { + this.offlineNodesVersions = offlineNodesVersions; + this.regionStates = regionStates; + this.counter = counter; + this.destination = destination; + } + + @Override + public void processResult(int rc, String path, Object ctx, Stat stat) { + if (rc != 0) { + // This is result code. If non-zero, need to resubmit. + LOG.warn("rc != 0 for " + path + " -- retryable connectionloss -- " + + "FIX see http://wiki.apache.org/hadoop/ZooKeeper/FAQ#A2"); + return; + } + RegionState state = (RegionState)ctx; + if (LOG.isDebugEnabled()) { + LOG.debug("rs=" + state + + ", server=" + this.destination.toString()); + } + // Transition RegionState to PENDING_OPEN here in master; means we've + // sent the open. We're a little ahead of ourselves here since we've not + // yet sent out the actual open but putting this state change after the + // call to open risks our writing PENDING_OPEN after state has been moved + // to OPENING by the regionserver. + HRegionInfo region = state.getRegion(); + offlineNodesVersions.put( + region.getEncodedName(), Integer.valueOf(stat.getVersion())); + regionStates.updateRegionState(region, + RegionState.State.PENDING_OPEN, destination); + + this.counter.addAndGet(1); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionState.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionState.java index fd1a39dff53..dc95896d274 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionState.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionState.java @@ -126,6 +126,14 @@ public class RegionState implements org.apache.hadoop.io.Writable { return state == State.SPLIT; } + public boolean isPendingOpenOrOpeningOnServer(final ServerName sn) { + return isOnServer(sn) && (isPendingOpen() || isOpening()); + } + + public boolean isPendingCloseOrClosingOnServer(final ServerName sn) { + return isOnServer(sn) && (isPendingClose() || isClosing()); + } + @Override public String toString() { return "{" + region.getRegionNameAsString() @@ -234,6 +242,10 @@ public class RegionState implements org.apache.hadoop.io.Writable { return new RegionState(HRegionInfo.convert(proto.getRegionInfo()),state,proto.getStamp(),null); } + private boolean isOnServer(final ServerName sn) { + return serverName != null && serverName.equals(sn); + } + /** * @deprecated Writables are going away */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java index 7dbaa4eecc9..cc7f84a7e8f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java @@ -205,15 +205,6 @@ public class RegionStates { return updateRegionState(hri, state, serverName); } - /** - * Update a region state. If it is not splitting, - * it will be put in transition if not already there. - */ - public synchronized RegionState updateRegionState( - final HRegionInfo hri, final State state, final ServerName serverName) { - return updateRegionState(hri, state, System.currentTimeMillis(), serverName); - } - /** * Update a region state. If it is not splitting, * it will be put in transition if not already there. @@ -234,15 +225,15 @@ public class RegionStates { return null; } return updateRegionState(regionInfo, state, - transition.getCreateTime(), transition.getServerName()); + transition.getServerName()); } /** * Update a region state. If it is not splitting, * it will be put in transition if not already there. */ - public synchronized RegionState updateRegionState(final HRegionInfo hri, - final State state, final long stamp, final ServerName serverName) { + public synchronized RegionState updateRegionState( + final HRegionInfo hri, final State state, final ServerName serverName) { ServerName newServerName = serverName; if (serverName != null && (state == State.CLOSED || state == State.OFFLINE)) { @@ -252,7 +243,8 @@ public class RegionStates { } String regionName = hri.getEncodedName(); - RegionState regionState = new RegionState(hri, state, stamp, newServerName); + RegionState regionState = new RegionState( + hri, state, System.currentTimeMillis(), newServerName); RegionState oldState = regionStates.put(regionName, regionState); LOG.info("Region " + hri + " transitioned from " + oldState + " to " + regionState); if (state != State.SPLITTING && (newServerName != null diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index 87fe4743d61..d5f982ba1d7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -60,6 +60,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse; import org.apache.hadoop.hbase.regionserver.RegionOpeningState; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Pair; import com.google.protobuf.ServiceException; @@ -602,11 +603,11 @@ public class ServerManager { * Open should not fail but can if server just crashed. *

* @param server server to open a region - * @param regions regions to open + * @param regionOpenInfos info of a list of regions to open * @return a list of region opening states */ public List sendRegionOpen(ServerName server, - List regions) + List> regionOpenInfos) throws IOException { AdminProtocol admin = getServerConnection(server); if (admin == null) { @@ -615,8 +616,14 @@ public class ServerManager { return null; } - OpenRegionResponse response = ProtobufUtil.openRegion(admin, regions); - return ResponseConverter.getRegionOpeningStateList(response); + OpenRegionRequest request = + RequestConverter.buildOpenRegionRequest(regionOpenInfos); + try { + OpenRegionResponse response = admin.openRegion(null, request); + return ResponseConverter.getRegionOpeningStateList(response); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } } /** @@ -634,7 +641,7 @@ public class ServerManager { * @throws IOException */ public boolean sendRegionClose(ServerName server, HRegionInfo region, - int versionOfClosingNode, ServerName dest) throws IOException { + int versionOfClosingNode, ServerName dest, boolean transitionInZK) throws IOException { if (server == null) throw new NullPointerException("Passed server is null"); AdminProtocol admin = getServerConnection(server); if (admin == null) { @@ -644,12 +651,12 @@ public class ServerManager { " failed because no RPC connection found to this server"); } return ProtobufUtil.closeRegion(admin, region.getRegionName(), - versionOfClosingNode, dest); + versionOfClosingNode, dest, transitionInZK); } - public boolean sendRegionClose(ServerName server, HRegionInfo region, - int versionOfClosingNode) throws IOException { - return sendRegionClose(server, region, versionOfClosingNode, null); + public boolean sendRegionClose(ServerName server, + HRegionInfo region, int versionOfClosingNode) throws IOException { + return sendRegionClose(server, region, versionOfClosingNode, null, true); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java index dc89df945de..f46c87029b4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java @@ -228,12 +228,7 @@ public class EnableTableHandler extends EventHandler { final HRegionInfo hri = region; pool.execute(Trace.wrap(new Runnable() { public void run() { - if (retainAssignment) { - // Already plan is populated. - assignmentManager.assign(hri, true, false, false); - } else { - assignmentManager.assign(hri, true); - } + assignmentManager.assign(hri, true); } })); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java index 54c4bea0720..475cacc6ef7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java @@ -100,8 +100,7 @@ public class OpenedRegionHandler extends EventHandler implements TotesHRegionInf RegionState regionState = this.assignmentManager.getRegionStates() .getRegionTransitionState(regionInfo.getEncodedName()); boolean openedNodeDeleted = false; - if (regionState != null - && regionState.getState().equals(RegionState.State.OPEN)) { + if (regionState != null && regionState.isOpened()) { openedNodeDeleted = deleteOpenedNode(expectedVersion); if (!openedNodeDeleted) { LOG.error("The znode of region " + regionInfo.getRegionNameAsString() diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 66b0ecfe460..da732e2b258 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.protobuf; +import static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME; + import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.DataInput; @@ -32,11 +34,11 @@ import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.TreeSet; import java.util.Map.Entry; import java.util.NavigableMap; import java.util.NavigableSet; import java.util.TreeMap; +import java.util.TreeSet; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.DeserializationException; @@ -65,8 +67,8 @@ import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.coprocessor.Exec; import org.apache.hadoop.hbase.client.coprocessor.ExecResult; -import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.ByteArrayComparable; +import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.HbaseObjectWritable; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.ipc.CoprocessorProtocol; @@ -82,7 +84,6 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRespo import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest; @@ -108,12 +109,13 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.ColumnValu import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.DeleteType; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.MutateType; import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos; -import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse; import org.apache.hadoop.hbase.regionserver.wal.HLog; @@ -128,18 +130,16 @@ import org.apache.hadoop.hbase.util.Pair; import com.google.common.collect.ArrayListMultimap; import com.google.common.collect.ListMultimap; - import com.google.protobuf.ByteString; import com.google.protobuf.Message; import com.google.protobuf.RpcChannel; import com.google.protobuf.Service; import com.google.protobuf.ServiceException; -import static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.*; - /** * Protobufs utility. */ +@SuppressWarnings("deprecation") public final class ProtobufUtil { private ProtobufUtil() { @@ -983,6 +983,7 @@ public final class ProtobufUtil { * @param proto the protocol buffer Comparator to convert * @return the converted ByteArrayComparable */ + @SuppressWarnings("unchecked") public static ByteArrayComparable toComparator(ComparatorProtos.Comparator proto) throws IOException { String type = proto.getName(); @@ -1007,6 +1008,7 @@ public final class ProtobufUtil { * @param proto the protocol buffer Filter to convert * @return the converted Filter */ + @SuppressWarnings("unchecked") public static Filter toFilter(HBaseProtos.Filter proto) throws IOException { String type = proto.getName(); final byte [] value = proto.getSerializedFilter().toByteArray(); @@ -1349,6 +1351,7 @@ public final class ProtobufUtil { } } + @SuppressWarnings("unchecked") public static T newServiceStub(Class service, RpcChannel channel) throws Exception { return (T)Methods.call(service, null, "newStub", @@ -1400,28 +1403,6 @@ public final class ProtobufUtil { } } - /** - * A helper to close a region given a region name - * using admin protocol. - * - * @param admin - * @param regionName - * @param versionOfClosingNode - * @return true if the region is closed - * @throws IOException - */ - public static boolean closeRegion(final AdminProtocol admin, - final byte[] regionName, final int versionOfClosingNode) throws IOException { - CloseRegionRequest closeRegionRequest = - RequestConverter.buildCloseRegionRequest(regionName, versionOfClosingNode); - try { - CloseRegionResponse response = admin.closeRegion(null, closeRegionRequest); - return ResponseConverter.isClosed(response); - } catch (ServiceException se) { - throw getRemoteException(se); - } - } - /** * A helper to close a region given a region name * using admin protocol. @@ -1433,9 +1414,11 @@ public final class ProtobufUtil { * @throws IOException */ public static boolean closeRegion(final AdminProtocol admin, final byte[] regionName, - final int versionOfClosingNode, final ServerName destinationServer) throws IOException { + final int versionOfClosingNode, final ServerName destinationServer, + final boolean transitionInZK) throws IOException { CloseRegionRequest closeRegionRequest = - RequestConverter.buildCloseRegionRequest(regionName, versionOfClosingNode, destinationServer); + RequestConverter.buildCloseRegionRequest( + regionName, versionOfClosingNode, destinationServer, transitionInZK); try { CloseRegionResponse response = admin.closeRegion(null, closeRegionRequest); return ResponseConverter.isClosed(response); @@ -1462,25 +1445,6 @@ public final class ProtobufUtil { } } - /** - * A helper to open a list of regions using admin protocol. - * - * @param admin - * @param regions - * @return OpenRegionResponse - * @throws IOException - */ - public static OpenRegionResponse openRegion(final AdminProtocol admin, - final List regions) throws IOException { - OpenRegionRequest request = - RequestConverter.buildOpenRegionRequest(regions); - try { - return admin.openRegion(null, request); - } catch (ServiceException se) { - throw getRemoteException(se); - } - } - /** * A helper to get the all the online regions on a region * server using admin protocol. @@ -1839,6 +1803,28 @@ public final class ProtobufUtil { return perms; } + /** + * Find the HRegion encoded name based on a region specifier + * + * @param regionSpecifier the region specifier + * @return the corresponding region's encoded name + * @throws DoNotRetryIOException if the specifier type is unsupported + */ + public static String getRegionEncodedName( + final RegionSpecifier regionSpecifier) throws DoNotRetryIOException { + byte[] value = regionSpecifier.getValue().toByteArray(); + RegionSpecifierType type = regionSpecifier.getType(); + switch (type) { + case REGION_NAME: + return HRegionInfo.encodeRegionName(value); + case ENCODED_REGION_NAME: + return Bytes.toString(value); + default: + throw new DoNotRetryIOException( + "Unsupported region specifier type: " + type); + } + } + /** * Unwraps an exception from a protobuf service into the underlying (expected) IOException. * This method will always throw an exception. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index ff513ba383e..37e44e4236f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoReque import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest; @@ -82,6 +83,7 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnRequest; @@ -90,17 +92,16 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTable import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; @@ -115,6 +116,7 @@ import com.google.protobuf.ByteString; * or build components for protocol buffer requests. */ @InterfaceAudience.Private +@SuppressWarnings("deprecation") public final class RequestConverter { private RequestConverter() { @@ -612,29 +614,21 @@ public final class RequestConverter { /** * Create a protocol buffer OpenRegionRequest to open a list of regions * - * @param regions the list of regions to open + * @param regionOpenInfos info of a list of regions to open * @return a protocol buffer OpenRegionRequest */ public static OpenRegionRequest - buildOpenRegionRequest(final List regions) { + buildOpenRegionRequest(final List> regionOpenInfos) { OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder(); - for (HRegionInfo region: regions) { - builder.addRegion(HRegionInfo.convert(region)); + for (Pair regionOpenInfo: regionOpenInfos) { + Integer second = regionOpenInfo.getSecond(); + int versionOfOfflineNode = second == null ? -1 : second.intValue(); + builder.addOpenInfo(buildRegionOpenInfo( + regionOpenInfo.getFirst(), versionOfOfflineNode)); } return builder.build(); } - /** - * Create a protocol buffer OpenRegionRequest for a given region - * - * @param region the region to open - * @return a protocol buffer OpenRegionRequest - */ - public static OpenRegionRequest - buildOpenRegionRequest(final HRegionInfo region) { - return buildOpenRegionRequest(region, -1); - } - /** * Create a protocol buffer OpenRegionRequest for a given region * @@ -645,10 +639,7 @@ public final class RequestConverter { public static OpenRegionRequest buildOpenRegionRequest( final HRegionInfo region, final int versionOfOfflineNode) { OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder(); - builder.addRegion(HRegionInfo.convert(region)); - if (versionOfOfflineNode >= 0) { - builder.setVersionOfOfflineNode(versionOfOfflineNode); - } + builder.addOpenInfo(buildRegionOpenInfo(region, versionOfOfflineNode)); return builder.build(); } @@ -669,32 +660,15 @@ public final class RequestConverter { return builder.build(); } - /** - * Create a CloseRegionRequest for a given region name - * - * @param regionName the name of the region to close - * @param versionOfClosingNode - * the version of znode to compare when RS transitions the znode from - * CLOSING state. - * @return a CloseRegionRequest - */ - public static CloseRegionRequest buildCloseRegionRequest( - final byte[] regionName, final int versionOfClosingNode) { - CloseRegionRequest.Builder builder = CloseRegionRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); - builder.setRegion(region); - builder.setVersionOfClosingNode(versionOfClosingNode); - return builder.build(); - } - public static CloseRegionRequest buildCloseRegionRequest( - final byte[] regionName, final int versionOfClosingNode, ServerName destinationServer) { + final byte[] regionName, final int versionOfClosingNode, + ServerName destinationServer, final boolean transitionInZK) { CloseRegionRequest.Builder builder = CloseRegionRequest.newBuilder(); RegionSpecifier region = buildRegionSpecifier( RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); builder.setVersionOfClosingNode(versionOfClosingNode); + builder.setTransitionInZK(transitionInZK); if (destinationServer != null){ builder.setDestinationServer(ProtobufUtil.toServerName( destinationServer) ); } @@ -1153,4 +1127,17 @@ public final class RequestConverter { return GetLastFlushedSequenceIdRequest.newBuilder().setRegionName( ByteString.copyFrom(regionName)).build(); } + + /** + * Create a RegionOpenInfo based on given region info and version of offline node + */ + private static RegionOpenInfo buildRegionOpenInfo( + final HRegionInfo region, final int versionOfOfflineNode) { + RegionOpenInfo.Builder builder = RegionOpenInfo.newBuilder(); + builder.setRegion(HRegionInfo.convert(region)); + if (versionOfOfflineNode >= 0) { + builder.setVersionOfOfflineNode(versionOfOfflineNode); + } + return builder.build(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java index 0ea88e4a499..66c6cf5b3c8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java @@ -21,13 +21,12 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import com.google.protobuf.RpcController; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.ipc.ServerRpcController; -import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos; +import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UserPermissionsResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse; @@ -38,7 +37,6 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ActionResult; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse; @@ -47,8 +45,7 @@ import org.apache.hadoop.hbase.security.access.UserPermission; import org.apache.hadoop.util.StringUtils; import com.google.protobuf.ByteString; - -import static org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UserPermissionsResponse; +import com.google.protobuf.RpcController; /** * Helper utility to build protocol buffer responses, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java index a728b96ee29..d2dfc3cef86 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java @@ -3069,19 +3069,15 @@ public final class AdminProtos { public interface OpenRegionRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { - // repeated .RegionInfo region = 1; - java.util.List - getRegionList(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegion(int index); - int getRegionCount(); - java.util.List - getRegionOrBuilderList(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder( + // repeated .OpenRegionRequest.RegionOpenInfo openInfo = 1; + java.util.List + getOpenInfoList(); + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo getOpenInfo(int index); + int getOpenInfoCount(); + java.util.List + getOpenInfoOrBuilderList(); + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfoOrBuilder getOpenInfoOrBuilder( int index); - - // optional uint32 versionOfOfflineNode = 2; - boolean hasVersionOfOfflineNode(); - int getVersionOfOfflineNode(); } public static final class OpenRegionRequest extends com.google.protobuf.GeneratedMessage @@ -3111,49 +3107,576 @@ public final class AdminProtos { return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_OpenRegionRequest_fieldAccessorTable; } - private int bitField0_; - // repeated .RegionInfo region = 1; - public static final int REGION_FIELD_NUMBER = 1; - private java.util.List region_; - public java.util.List getRegionList() { - return region_; + public interface RegionOpenInfoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .RegionInfo region = 1; + boolean hasRegion(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegion(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder(); + + // optional uint32 versionOfOfflineNode = 2; + boolean hasVersionOfOfflineNode(); + int getVersionOfOfflineNode(); } - public java.util.List - getRegionOrBuilderList() { - return region_; - } - public int getRegionCount() { - return region_.size(); - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegion(int index) { - return region_.get(index); - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder( - int index) { - return region_.get(index); + public static final class RegionOpenInfo extends + com.google.protobuf.GeneratedMessage + implements RegionOpenInfoOrBuilder { + // Use RegionOpenInfo.newBuilder() to construct. + private RegionOpenInfo(Builder builder) { + super(builder); + } + private RegionOpenInfo(boolean noInit) {} + + private static final RegionOpenInfo defaultInstance; + public static RegionOpenInfo getDefaultInstance() { + return defaultInstance; + } + + public RegionOpenInfo getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_OpenRegionRequest_RegionOpenInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_OpenRegionRequest_RegionOpenInfo_fieldAccessorTable; + } + + private int bitField0_; + // required .RegionInfo region = 1; + public static final int REGION_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo region_; + public boolean hasRegion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegion() { + return region_; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder() { + return region_; + } + + // optional uint32 versionOfOfflineNode = 2; + public static final int VERSIONOFOFFLINENODE_FIELD_NUMBER = 2; + private int versionOfOfflineNode_; + public boolean hasVersionOfOfflineNode() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public int getVersionOfOfflineNode() { + return versionOfOfflineNode_; + } + + private void initFields() { + region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + versionOfOfflineNode_ = 0; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasRegion()) { + memoizedIsInitialized = 0; + return false; + } + if (!getRegion().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, region_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt32(2, versionOfOfflineNode_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, region_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(2, versionOfOfflineNode_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo other = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo) obj; + + boolean result = true; + result = result && (hasRegion() == other.hasRegion()); + if (hasRegion()) { + result = result && getRegion() + .equals(other.getRegion()); + } + result = result && (hasVersionOfOfflineNode() == other.hasVersionOfOfflineNode()); + if (hasVersionOfOfflineNode()) { + result = result && (getVersionOfOfflineNode() + == other.getVersionOfOfflineNode()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRegion()) { + hash = (37 * hash) + REGION_FIELD_NUMBER; + hash = (53 * hash) + getRegion().hashCode(); + } + if (hasVersionOfOfflineNode()) { + hash = (37 * hash) + VERSIONOFOFFLINENODE_FIELD_NUMBER; + hash = (53 * hash) + getVersionOfOfflineNode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_OpenRegionRequest_RegionOpenInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_OpenRegionRequest_RegionOpenInfo_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getRegionFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (regionBuilder_ == null) { + region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + } else { + regionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + versionOfOfflineNode_ = 0; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo build() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo result = new org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (regionBuilder_ == null) { + result.region_ = region_; + } else { + result.region_ = regionBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.versionOfOfflineNode_ = versionOfOfflineNode_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.getDefaultInstance()) return this; + if (other.hasRegion()) { + mergeRegion(other.getRegion()); + } + if (other.hasVersionOfOfflineNode()) { + setVersionOfOfflineNode(other.getVersionOfOfflineNode()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasRegion()) { + + return false; + } + if (!getRegion().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(); + if (hasRegion()) { + subBuilder.mergeFrom(getRegion()); + } + input.readMessage(subBuilder, extensionRegistry); + setRegion(subBuilder.buildPartial()); + break; + } + case 16: { + bitField0_ |= 0x00000002; + versionOfOfflineNode_ = input.readUInt32(); + break; + } + } + } + } + + private int bitField0_; + + // required .RegionInfo region = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionBuilder_; + public boolean hasRegion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegion() { + if (regionBuilder_ == null) { + return region_; + } else { + return regionBuilder_.getMessage(); + } + } + public Builder setRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + region_ = value; + onChanged(); + } else { + regionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setRegion( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionBuilder_ == null) { + region_ = builderForValue.build(); + onChanged(); + } else { + regionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + region_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) { + region_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(region_).mergeFrom(value).buildPartial(); + } else { + region_ = value; + } + onChanged(); + } else { + regionBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder clearRegion() { + if (regionBuilder_ == null) { + region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + onChanged(); + } else { + regionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getRegionFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder() { + if (regionBuilder_ != null) { + return regionBuilder_.getMessageOrBuilder(); + } else { + return region_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> + getRegionFieldBuilder() { + if (regionBuilder_ == null) { + regionBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>( + region_, + getParentForChildren(), + isClean()); + region_ = null; + } + return regionBuilder_; + } + + // optional uint32 versionOfOfflineNode = 2; + private int versionOfOfflineNode_ ; + public boolean hasVersionOfOfflineNode() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public int getVersionOfOfflineNode() { + return versionOfOfflineNode_; + } + public Builder setVersionOfOfflineNode(int value) { + bitField0_ |= 0x00000002; + versionOfOfflineNode_ = value; + onChanged(); + return this; + } + public Builder clearVersionOfOfflineNode() { + bitField0_ = (bitField0_ & ~0x00000002); + versionOfOfflineNode_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:OpenRegionRequest.RegionOpenInfo) + } + + static { + defaultInstance = new RegionOpenInfo(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:OpenRegionRequest.RegionOpenInfo) } - // optional uint32 versionOfOfflineNode = 2; - public static final int VERSIONOFOFFLINENODE_FIELD_NUMBER = 2; - private int versionOfOfflineNode_; - public boolean hasVersionOfOfflineNode() { - return ((bitField0_ & 0x00000001) == 0x00000001); + // repeated .OpenRegionRequest.RegionOpenInfo openInfo = 1; + public static final int OPENINFO_FIELD_NUMBER = 1; + private java.util.List openInfo_; + public java.util.List getOpenInfoList() { + return openInfo_; } - public int getVersionOfOfflineNode() { - return versionOfOfflineNode_; + public java.util.List + getOpenInfoOrBuilderList() { + return openInfo_; + } + public int getOpenInfoCount() { + return openInfo_.size(); + } + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo getOpenInfo(int index) { + return openInfo_.get(index); + } + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfoOrBuilder getOpenInfoOrBuilder( + int index) { + return openInfo_.get(index); } private void initFields() { - region_ = java.util.Collections.emptyList(); - versionOfOfflineNode_ = 0; + openInfo_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - for (int i = 0; i < getRegionCount(); i++) { - if (!getRegion(i).isInitialized()) { + for (int i = 0; i < getOpenInfoCount(); i++) { + if (!getOpenInfo(i).isInitialized()) { memoizedIsInitialized = 0; return false; } @@ -3165,11 +3688,8 @@ public final class AdminProtos { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - for (int i = 0; i < region_.size(); i++) { - output.writeMessage(1, region_.get(i)); - } - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt32(2, versionOfOfflineNode_); + for (int i = 0; i < openInfo_.size(); i++) { + output.writeMessage(1, openInfo_.get(i)); } getUnknownFields().writeTo(output); } @@ -3180,13 +3700,9 @@ public final class AdminProtos { if (size != -1) return size; size = 0; - for (int i = 0; i < region_.size(); i++) { + for (int i = 0; i < openInfo_.size(); i++) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, region_.get(i)); - } - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(2, versionOfOfflineNode_); + .computeMessageSize(1, openInfo_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -3211,13 +3727,8 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest other = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest) obj; boolean result = true; - result = result && getRegionList() - .equals(other.getRegionList()); - result = result && (hasVersionOfOfflineNode() == other.hasVersionOfOfflineNode()); - if (hasVersionOfOfflineNode()) { - result = result && (getVersionOfOfflineNode() - == other.getVersionOfOfflineNode()); - } + result = result && getOpenInfoList() + .equals(other.getOpenInfoList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -3227,13 +3738,9 @@ public final class AdminProtos { public int hashCode() { int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (getRegionCount() > 0) { - hash = (37 * hash) + REGION_FIELD_NUMBER; - hash = (53 * hash) + getRegionList().hashCode(); - } - if (hasVersionOfOfflineNode()) { - hash = (37 * hash) + VERSIONOFOFFLINENODE_FIELD_NUMBER; - hash = (53 * hash) + getVersionOfOfflineNode(); + if (getOpenInfoCount() > 0) { + hash = (37 * hash) + OPENINFO_FIELD_NUMBER; + hash = (53 * hash) + getOpenInfoList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); return hash; @@ -3343,7 +3850,7 @@ public final class AdminProtos { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getRegionFieldBuilder(); + getOpenInfoFieldBuilder(); } } private static Builder create() { @@ -3352,14 +3859,12 @@ public final class AdminProtos { public Builder clear() { super.clear(); - if (regionBuilder_ == null) { - region_ = java.util.Collections.emptyList(); + if (openInfoBuilder_ == null) { + openInfo_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { - regionBuilder_.clear(); + openInfoBuilder_.clear(); } - versionOfOfflineNode_ = 0; - bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -3397,21 +3902,15 @@ public final class AdminProtos { public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest buildPartial() { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest result = new org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest(this); int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (regionBuilder_ == null) { + if (openInfoBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001)) { - region_ = java.util.Collections.unmodifiableList(region_); + openInfo_ = java.util.Collections.unmodifiableList(openInfo_); bitField0_ = (bitField0_ & ~0x00000001); } - result.region_ = region_; + result.openInfo_ = openInfo_; } else { - result.region_ = regionBuilder_.build(); + result.openInfo_ = openInfoBuilder_.build(); } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000001; - } - result.versionOfOfflineNode_ = versionOfOfflineNode_; - result.bitField0_ = to_bitField0_; onBuilt(); return result; } @@ -3427,42 +3926,39 @@ public final class AdminProtos { public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest other) { if (other == org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.getDefaultInstance()) return this; - if (regionBuilder_ == null) { - if (!other.region_.isEmpty()) { - if (region_.isEmpty()) { - region_ = other.region_; + if (openInfoBuilder_ == null) { + if (!other.openInfo_.isEmpty()) { + if (openInfo_.isEmpty()) { + openInfo_ = other.openInfo_; bitField0_ = (bitField0_ & ~0x00000001); } else { - ensureRegionIsMutable(); - region_.addAll(other.region_); + ensureOpenInfoIsMutable(); + openInfo_.addAll(other.openInfo_); } onChanged(); } } else { - if (!other.region_.isEmpty()) { - if (regionBuilder_.isEmpty()) { - regionBuilder_.dispose(); - regionBuilder_ = null; - region_ = other.region_; + if (!other.openInfo_.isEmpty()) { + if (openInfoBuilder_.isEmpty()) { + openInfoBuilder_.dispose(); + openInfoBuilder_ = null; + openInfo_ = other.openInfo_; bitField0_ = (bitField0_ & ~0x00000001); - regionBuilder_ = + openInfoBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getRegionFieldBuilder() : null; + getOpenInfoFieldBuilder() : null; } else { - regionBuilder_.addAllMessages(other.region_); + openInfoBuilder_.addAllMessages(other.openInfo_); } } } - if (other.hasVersionOfOfflineNode()) { - setVersionOfOfflineNode(other.getVersionOfOfflineNode()); - } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - for (int i = 0; i < getRegionCount(); i++) { - if (!getRegion(i).isInitialized()) { + for (int i = 0; i < getOpenInfoCount(); i++) { + if (!getOpenInfo(i).isInitialized()) { return false; } @@ -3494,14 +3990,9 @@ public final class AdminProtos { break; } case 10: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(); + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.newBuilder(); input.readMessage(subBuilder, extensionRegistry); - addRegion(subBuilder.buildPartial()); - break; - } - case 16: { - bitField0_ |= 0x00000002; - versionOfOfflineNode_ = input.readUInt32(); + addOpenInfo(subBuilder.buildPartial()); break; } } @@ -3510,211 +4001,190 @@ public final class AdminProtos { private int bitField0_; - // repeated .RegionInfo region = 1; - private java.util.List region_ = + // repeated .OpenRegionRequest.RegionOpenInfo openInfo = 1; + private java.util.List openInfo_ = java.util.Collections.emptyList(); - private void ensureRegionIsMutable() { + private void ensureOpenInfoIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { - region_ = new java.util.ArrayList(region_); + openInfo_ = new java.util.ArrayList(openInfo_); bitField0_ |= 0x00000001; } } private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionBuilder_; + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfoOrBuilder> openInfoBuilder_; - public java.util.List getRegionList() { - if (regionBuilder_ == null) { - return java.util.Collections.unmodifiableList(region_); + public java.util.List getOpenInfoList() { + if (openInfoBuilder_ == null) { + return java.util.Collections.unmodifiableList(openInfo_); } else { - return regionBuilder_.getMessageList(); + return openInfoBuilder_.getMessageList(); } } - public int getRegionCount() { - if (regionBuilder_ == null) { - return region_.size(); + public int getOpenInfoCount() { + if (openInfoBuilder_ == null) { + return openInfo_.size(); } else { - return regionBuilder_.getCount(); + return openInfoBuilder_.getCount(); } } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegion(int index) { - if (regionBuilder_ == null) { - return region_.get(index); + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo getOpenInfo(int index) { + if (openInfoBuilder_ == null) { + return openInfo_.get(index); } else { - return regionBuilder_.getMessage(index); + return openInfoBuilder_.getMessage(index); } } - public Builder setRegion( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { - if (regionBuilder_ == null) { + public Builder setOpenInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo value) { + if (openInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureRegionIsMutable(); - region_.set(index, value); + ensureOpenInfoIsMutable(); + openInfo_.set(index, value); onChanged(); } else { - regionBuilder_.setMessage(index, value); + openInfoBuilder_.setMessage(index, value); } return this; } - public Builder setRegion( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { - if (regionBuilder_ == null) { - ensureRegionIsMutable(); - region_.set(index, builderForValue.build()); + public Builder setOpenInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.Builder builderForValue) { + if (openInfoBuilder_ == null) { + ensureOpenInfoIsMutable(); + openInfo_.set(index, builderForValue.build()); onChanged(); } else { - regionBuilder_.setMessage(index, builderForValue.build()); + openInfoBuilder_.setMessage(index, builderForValue.build()); } return this; } - public Builder addRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { - if (regionBuilder_ == null) { + public Builder addOpenInfo(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo value) { + if (openInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureRegionIsMutable(); - region_.add(value); + ensureOpenInfoIsMutable(); + openInfo_.add(value); onChanged(); } else { - regionBuilder_.addMessage(value); + openInfoBuilder_.addMessage(value); } return this; } - public Builder addRegion( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { - if (regionBuilder_ == null) { + public Builder addOpenInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo value) { + if (openInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureRegionIsMutable(); - region_.add(index, value); + ensureOpenInfoIsMutable(); + openInfo_.add(index, value); onChanged(); } else { - regionBuilder_.addMessage(index, value); + openInfoBuilder_.addMessage(index, value); } return this; } - public Builder addRegion( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { - if (regionBuilder_ == null) { - ensureRegionIsMutable(); - region_.add(builderForValue.build()); + public Builder addOpenInfo( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.Builder builderForValue) { + if (openInfoBuilder_ == null) { + ensureOpenInfoIsMutable(); + openInfo_.add(builderForValue.build()); onChanged(); } else { - regionBuilder_.addMessage(builderForValue.build()); + openInfoBuilder_.addMessage(builderForValue.build()); } return this; } - public Builder addRegion( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { - if (regionBuilder_ == null) { - ensureRegionIsMutable(); - region_.add(index, builderForValue.build()); + public Builder addOpenInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.Builder builderForValue) { + if (openInfoBuilder_ == null) { + ensureOpenInfoIsMutable(); + openInfo_.add(index, builderForValue.build()); onChanged(); } else { - regionBuilder_.addMessage(index, builderForValue.build()); + openInfoBuilder_.addMessage(index, builderForValue.build()); } return this; } - public Builder addAllRegion( - java.lang.Iterable values) { - if (regionBuilder_ == null) { - ensureRegionIsMutable(); - super.addAll(values, region_); + public Builder addAllOpenInfo( + java.lang.Iterable values) { + if (openInfoBuilder_ == null) { + ensureOpenInfoIsMutable(); + super.addAll(values, openInfo_); onChanged(); } else { - regionBuilder_.addAllMessages(values); + openInfoBuilder_.addAllMessages(values); } return this; } - public Builder clearRegion() { - if (regionBuilder_ == null) { - region_ = java.util.Collections.emptyList(); + public Builder clearOpenInfo() { + if (openInfoBuilder_ == null) { + openInfo_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { - regionBuilder_.clear(); + openInfoBuilder_.clear(); } return this; } - public Builder removeRegion(int index) { - if (regionBuilder_ == null) { - ensureRegionIsMutable(); - region_.remove(index); + public Builder removeOpenInfo(int index) { + if (openInfoBuilder_ == null) { + ensureOpenInfoIsMutable(); + openInfo_.remove(index); onChanged(); } else { - regionBuilder_.remove(index); + openInfoBuilder_.remove(index); } return this; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionBuilder( + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.Builder getOpenInfoBuilder( int index) { - return getRegionFieldBuilder().getBuilder(index); + return getOpenInfoFieldBuilder().getBuilder(index); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder( + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfoOrBuilder getOpenInfoOrBuilder( int index) { - if (regionBuilder_ == null) { - return region_.get(index); } else { - return regionBuilder_.getMessageOrBuilder(index); + if (openInfoBuilder_ == null) { + return openInfo_.get(index); } else { + return openInfoBuilder_.getMessageOrBuilder(index); } } - public java.util.List - getRegionOrBuilderList() { - if (regionBuilder_ != null) { - return regionBuilder_.getMessageOrBuilderList(); + public java.util.List + getOpenInfoOrBuilderList() { + if (openInfoBuilder_ != null) { + return openInfoBuilder_.getMessageOrBuilderList(); } else { - return java.util.Collections.unmodifiableList(region_); + return java.util.Collections.unmodifiableList(openInfo_); } } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionBuilder() { - return getRegionFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()); + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.Builder addOpenInfoBuilder() { + return getOpenInfoFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionBuilder( + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.Builder addOpenInfoBuilder( int index) { - return getRegionFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()); + return getOpenInfoFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.getDefaultInstance()); } - public java.util.List - getRegionBuilderList() { - return getRegionFieldBuilder().getBuilderList(); + public java.util.List + getOpenInfoBuilderList() { + return getOpenInfoFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> - getRegionFieldBuilder() { - if (regionBuilder_ == null) { - regionBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>( - region_, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfoOrBuilder> + getOpenInfoFieldBuilder() { + if (openInfoBuilder_ == null) { + openInfoBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfoOrBuilder>( + openInfo_, ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), isClean()); - region_ = null; + openInfo_ = null; } - return regionBuilder_; - } - - // optional uint32 versionOfOfflineNode = 2; - private int versionOfOfflineNode_ ; - public boolean hasVersionOfOfflineNode() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public int getVersionOfOfflineNode() { - return versionOfOfflineNode_; - } - public Builder setVersionOfOfflineNode(int value) { - bitField0_ |= 0x00000002; - versionOfOfflineNode_ = value; - onChanged(); - return this; - } - public Builder clearVersionOfOfflineNode() { - bitField0_ = (bitField0_ & ~0x00000002); - versionOfOfflineNode_ = 0; - onChanged(); - return this; + return openInfoBuilder_; } // @@protoc_insertion_point(builder_scope:OpenRegionRequest) @@ -15753,6 +16223,11 @@ public final class AdminProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_OpenRegionRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_OpenRegionRequest_RegionOpenInfo_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_OpenRegionRequest_RegionOpenInfo_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_OpenRegionResponse_descriptor; private static @@ -15889,69 +16364,70 @@ public final class AdminProtos { "er\022\016\n\006family\030\002 \003(\014\")\n\024GetStoreFileRespon", "se\022\021\n\tstoreFile\030\001 \003(\t\"\030\n\026GetOnlineRegion" + "Request\":\n\027GetOnlineRegionResponse\022\037\n\nre" + - "gionInfo\030\001 \003(\0132\013.RegionInfo\"N\n\021OpenRegio" + - "nRequest\022\033\n\006region\030\001 \003(\0132\013.RegionInfo\022\034\n" + - "\024versionOfOfflineNode\030\002 \001(\r\"\234\001\n\022OpenRegi" + - "onResponse\022<\n\014openingState\030\001 \003(\0162&.OpenR" + - "egionResponse.RegionOpeningState\"H\n\022Regi" + - "onOpeningState\022\n\n\006OPENED\020\000\022\022\n\016ALREADY_OP" + - "ENED\020\001\022\022\n\016FAILED_OPENING\020\002\"\232\001\n\022CloseRegi" + - "onRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpecif", - "ier\022\034\n\024versionOfClosingNode\030\002 \001(\r\022\034\n\016tra" + - "nsitionInZK\030\003 \001(\010:\004true\022&\n\021destinationSe" + - "rver\030\004 \001(\0132\013.ServerName\"%\n\023CloseRegionRe" + - "sponse\022\016\n\006closed\030\001 \002(\010\"M\n\022FlushRegionReq" + - "uest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022\025" + - "\n\rifOlderThanTs\030\002 \001(\004\"=\n\023FlushRegionResp" + - "onse\022\025\n\rlastFlushTime\030\001 \002(\004\022\017\n\007flushed\030\002" + - " \001(\010\"J\n\022SplitRegionRequest\022 \n\006region\030\001 \002" + - "(\0132\020.RegionSpecifier\022\022\n\nsplitPoint\030\002 \001(\014" + - "\"\025\n\023SplitRegionResponse\"G\n\024CompactRegion", - "Request\022 \n\006region\030\001 \002(\0132\020.RegionSpecifie" + - "r\022\r\n\005major\030\002 \001(\010\"\027\n\025CompactRegionRespons" + - "e\"1\n\004UUID\022\024\n\014leastSigBits\030\001 \002(\004\022\023\n\013mostS" + - "igBits\030\002 \002(\004\"\270\003\n\010WALEntry\022\035\n\003key\030\001 \002(\0132\020" + - ".WALEntry.WALKey\022\037\n\004edit\030\002 \002(\0132\021.WALEntr" + - "y.WALEdit\032~\n\006WALKey\022\031\n\021encodedRegionName" + - "\030\001 \002(\014\022\021\n\ttableName\030\002 \002(\014\022\031\n\021logSequence" + - "Number\030\003 \002(\004\022\021\n\twriteTime\030\004 \002(\004\022\030\n\tclust" + - "erId\030\005 \001(\0132\005.UUID\032\353\001\n\007WALEdit\022\025\n\rkeyValu" + - "eBytes\030\001 \003(\014\0222\n\013familyScope\030\002 \003(\0132\035.WALE", - "ntry.WALEdit.FamilyScope\032M\n\013FamilyScope\022" + - "\016\n\006family\030\001 \002(\014\022.\n\tscopeType\030\002 \002(\0162\033.WAL" + - "Entry.WALEdit.ScopeType\"F\n\tScopeType\022\033\n\027" + - "REPLICATION_SCOPE_LOCAL\020\000\022\034\n\030REPLICATION" + - "_SCOPE_GLOBAL\020\001\"4\n\030ReplicateWALEntryRequ" + - "est\022\030\n\005entry\030\001 \003(\0132\t.WALEntry\"\033\n\031Replica" + - "teWALEntryResponse\"\026\n\024RollWALWriterReque" + - "st\".\n\025RollWALWriterResponse\022\025\n\rregionToF" + - "lush\030\001 \003(\014\"#\n\021StopServerRequest\022\016\n\006reaso" + - "n\030\001 \002(\t\"\024\n\022StopServerResponse\"\026\n\024GetServ", - "erInfoRequest\"@\n\nServerInfo\022\037\n\nserverNam" + - "e\030\001 \002(\0132\013.ServerName\022\021\n\twebuiPort\030\002 \001(\r\"" + - "8\n\025GetServerInfoResponse\022\037\n\nserverInfo\030\001" + - " \002(\0132\013.ServerInfo2\371\005\n\014AdminService\022>\n\rge" + - "tRegionInfo\022\025.GetRegionInfoRequest\032\026.Get" + - "RegionInfoResponse\022;\n\014getStoreFile\022\024.Get" + - "StoreFileRequest\032\025.GetStoreFileResponse\022" + - "D\n\017getOnlineRegion\022\027.GetOnlineRegionRequ" + - "est\032\030.GetOnlineRegionResponse\0225\n\nopenReg" + - "ion\022\022.OpenRegionRequest\032\023.OpenRegionResp", - "onse\0228\n\013closeRegion\022\023.CloseRegionRequest" + - "\032\024.CloseRegionResponse\0228\n\013flushRegion\022\023." + - "FlushRegionRequest\032\024.FlushRegionResponse" + - "\0228\n\013splitRegion\022\023.SplitRegionRequest\032\024.S" + - "plitRegionResponse\022>\n\rcompactRegion\022\025.Co" + - "mpactRegionRequest\032\026.CompactRegionRespon" + - "se\022J\n\021replicateWALEntry\022\031.ReplicateWALEn" + - "tryRequest\032\032.ReplicateWALEntryResponse\022>" + - "\n\rrollWALWriter\022\025.RollWALWriterRequest\032\026" + - ".RollWALWriterResponse\022>\n\rgetServerInfo\022", - "\025.GetServerInfoRequest\032\026.GetServerInfoRe" + - "sponse\0225\n\nstopServer\022\022.StopServerRequest" + - "\032\023.StopServerResponseBA\n*org.apache.hado" + - "op.hbase.protobuf.generatedB\013AdminProtos" + - "H\001\210\001\001\240\001\001" + "gionInfo\030\001 \003(\0132\013.RegionInfo\"\225\001\n\021OpenRegi" + + "onRequest\0223\n\010openInfo\030\001 \003(\0132!.OpenRegion" + + "Request.RegionOpenInfo\032K\n\016RegionOpenInfo" + + "\022\033\n\006region\030\001 \002(\0132\013.RegionInfo\022\034\n\024version" + + "OfOfflineNode\030\002 \001(\r\"\234\001\n\022OpenRegionRespon" + + "se\022<\n\014openingState\030\001 \003(\0162&.OpenRegionRes" + + "ponse.RegionOpeningState\"H\n\022RegionOpenin" + + "gState\022\n\n\006OPENED\020\000\022\022\n\016ALREADY_OPENED\020\001\022\022", + "\n\016FAILED_OPENING\020\002\"\232\001\n\022CloseRegionReques" + + "t\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022\034\n\024v" + + "ersionOfClosingNode\030\002 \001(\r\022\034\n\016transitionI" + + "nZK\030\003 \001(\010:\004true\022&\n\021destinationServer\030\004 \001" + + "(\0132\013.ServerName\"%\n\023CloseRegionResponse\022\016" + + "\n\006closed\030\001 \002(\010\"M\n\022FlushRegionRequest\022 \n\006" + + "region\030\001 \002(\0132\020.RegionSpecifier\022\025\n\rifOlde" + + "rThanTs\030\002 \001(\004\"=\n\023FlushRegionResponse\022\025\n\r" + + "lastFlushTime\030\001 \002(\004\022\017\n\007flushed\030\002 \001(\010\"J\n\022" + + "SplitRegionRequest\022 \n\006region\030\001 \002(\0132\020.Reg", + "ionSpecifier\022\022\n\nsplitPoint\030\002 \001(\014\"\025\n\023Spli" + + "tRegionResponse\"G\n\024CompactRegionRequest\022" + + " \n\006region\030\001 \002(\0132\020.RegionSpecifier\022\r\n\005maj" + + "or\030\002 \001(\010\"\027\n\025CompactRegionResponse\"1\n\004UUI" + + "D\022\024\n\014leastSigBits\030\001 \002(\004\022\023\n\013mostSigBits\030\002" + + " \002(\004\"\270\003\n\010WALEntry\022\035\n\003key\030\001 \002(\0132\020.WALEntr" + + "y.WALKey\022\037\n\004edit\030\002 \002(\0132\021.WALEntry.WALEdi" + + "t\032~\n\006WALKey\022\031\n\021encodedRegionName\030\001 \002(\014\022\021" + + "\n\ttableName\030\002 \002(\014\022\031\n\021logSequenceNumber\030\003" + + " \002(\004\022\021\n\twriteTime\030\004 \002(\004\022\030\n\tclusterId\030\005 \001", + "(\0132\005.UUID\032\353\001\n\007WALEdit\022\025\n\rkeyValueBytes\030\001" + + " \003(\014\0222\n\013familyScope\030\002 \003(\0132\035.WALEntry.WAL" + + "Edit.FamilyScope\032M\n\013FamilyScope\022\016\n\006famil" + + "y\030\001 \002(\014\022.\n\tscopeType\030\002 \002(\0162\033.WALEntry.WA" + + "LEdit.ScopeType\"F\n\tScopeType\022\033\n\027REPLICAT" + + "ION_SCOPE_LOCAL\020\000\022\034\n\030REPLICATION_SCOPE_G" + + "LOBAL\020\001\"4\n\030ReplicateWALEntryRequest\022\030\n\005e" + + "ntry\030\001 \003(\0132\t.WALEntry\"\033\n\031ReplicateWALEnt" + + "ryResponse\"\026\n\024RollWALWriterRequest\".\n\025Ro" + + "llWALWriterResponse\022\025\n\rregionToFlush\030\001 \003", + "(\014\"#\n\021StopServerRequest\022\016\n\006reason\030\001 \002(\t\"" + + "\024\n\022StopServerResponse\"\026\n\024GetServerInfoRe" + + "quest\"@\n\nServerInfo\022\037\n\nserverName\030\001 \002(\0132" + + "\013.ServerName\022\021\n\twebuiPort\030\002 \001(\r\"8\n\025GetSe" + + "rverInfoResponse\022\037\n\nserverInfo\030\001 \002(\0132\013.S" + + "erverInfo2\371\005\n\014AdminService\022>\n\rgetRegionI" + + "nfo\022\025.GetRegionInfoRequest\032\026.GetRegionIn" + + "foResponse\022;\n\014getStoreFile\022\024.GetStoreFil" + + "eRequest\032\025.GetStoreFileResponse\022D\n\017getOn" + + "lineRegion\022\027.GetOnlineRegionRequest\032\030.Ge", + "tOnlineRegionResponse\0225\n\nopenRegion\022\022.Op" + + "enRegionRequest\032\023.OpenRegionResponse\0228\n\013" + + "closeRegion\022\023.CloseRegionRequest\032\024.Close" + + "RegionResponse\0228\n\013flushRegion\022\023.FlushReg" + + "ionRequest\032\024.FlushRegionResponse\0228\n\013spli" + + "tRegion\022\023.SplitRegionRequest\032\024.SplitRegi" + + "onResponse\022>\n\rcompactRegion\022\025.CompactReg" + + "ionRequest\032\026.CompactRegionResponse\022J\n\021re" + + "plicateWALEntry\022\031.ReplicateWALEntryReque" + + "st\032\032.ReplicateWALEntryResponse\022>\n\rrollWA", + "LWriter\022\025.RollWALWriterRequest\032\026.RollWAL" + + "WriterResponse\022>\n\rgetServerInfo\022\025.GetSer" + + "verInfoRequest\032\026.GetServerInfoResponse\0225" + + "\n\nstopServer\022\022.StopServerRequest\032\023.StopS" + + "erverResponseBA\n*org.apache.hadoop.hbase" + + ".protobuf.generatedB\013AdminProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -16011,9 +16487,17 @@ public final class AdminProtos { internal_static_OpenRegionRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_OpenRegionRequest_descriptor, - new java.lang.String[] { "Region", "VersionOfOfflineNode", }, + new java.lang.String[] { "OpenInfo", }, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.Builder.class); + internal_static_OpenRegionRequest_RegionOpenInfo_descriptor = + internal_static_OpenRegionRequest_descriptor.getNestedTypes().get(0); + internal_static_OpenRegionRequest_RegionOpenInfo_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_OpenRegionRequest_RegionOpenInfo_descriptor, + new java.lang.String[] { "Region", "VersionOfOfflineNode", }, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.class, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.Builder.class); internal_static_OpenRegionResponse_descriptor = getDescriptor().getMessageTypes().get(7); internal_static_OpenRegionResponse_fieldAccessorTable = new diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java index 96eac237c1a..93999c6497e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java @@ -1385,10 +1385,10 @@ public final class ZooKeeperProtos { boolean hasCreateTime(); long getCreateTime(); - // optional .ServerName originServerName = 4; - boolean hasOriginServerName(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getOriginServerName(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getOriginServerNameOrBuilder(); + // required .ServerName serverName = 4; + boolean hasServerName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder(); // optional bytes payload = 5; boolean hasPayload(); @@ -1453,17 +1453,17 @@ public final class ZooKeeperProtos { return createTime_; } - // optional .ServerName originServerName = 4; - public static final int ORIGINSERVERNAME_FIELD_NUMBER = 4; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName originServerName_; - public boolean hasOriginServerName() { + // required .ServerName serverName = 4; + public static final int SERVERNAME_FIELD_NUMBER = 4; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_; + public boolean hasServerName() { return ((bitField0_ & 0x00000008) == 0x00000008); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getOriginServerName() { - return originServerName_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() { + return serverName_; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getOriginServerNameOrBuilder() { - return originServerName_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() { + return serverName_; } // optional bytes payload = 5; @@ -1480,7 +1480,7 @@ public final class ZooKeeperProtos { eventTypeCode_ = 0; regionName_ = com.google.protobuf.ByteString.EMPTY; createTime_ = 0L; - originServerName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); payload_ = com.google.protobuf.ByteString.EMPTY; } private byte memoizedIsInitialized = -1; @@ -1500,11 +1500,13 @@ public final class ZooKeeperProtos { memoizedIsInitialized = 0; return false; } - if (hasOriginServerName()) { - if (!getOriginServerName().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } + if (!hasServerName()) { + memoizedIsInitialized = 0; + return false; + } + if (!getServerName().isInitialized()) { + memoizedIsInitialized = 0; + return false; } memoizedIsInitialized = 1; return true; @@ -1523,7 +1525,7 @@ public final class ZooKeeperProtos { output.writeUInt64(3, createTime_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeMessage(4, originServerName_); + output.writeMessage(4, serverName_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { output.writeBytes(5, payload_); @@ -1551,7 +1553,7 @@ public final class ZooKeeperProtos { } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(4, originServerName_); + .computeMessageSize(4, serverName_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { size += com.google.protobuf.CodedOutputStream @@ -1595,10 +1597,10 @@ public final class ZooKeeperProtos { result = result && (getCreateTime() == other.getCreateTime()); } - result = result && (hasOriginServerName() == other.hasOriginServerName()); - if (hasOriginServerName()) { - result = result && getOriginServerName() - .equals(other.getOriginServerName()); + result = result && (hasServerName() == other.hasServerName()); + if (hasServerName()) { + result = result && getServerName() + .equals(other.getServerName()); } result = result && (hasPayload() == other.hasPayload()); if (hasPayload()) { @@ -1626,9 +1628,9 @@ public final class ZooKeeperProtos { hash = (37 * hash) + CREATETIME_FIELD_NUMBER; hash = (53 * hash) + hashLong(getCreateTime()); } - if (hasOriginServerName()) { - hash = (37 * hash) + ORIGINSERVERNAME_FIELD_NUMBER; - hash = (53 * hash) + getOriginServerName().hashCode(); + if (hasServerName()) { + hash = (37 * hash) + SERVERNAME_FIELD_NUMBER; + hash = (53 * hash) + getServerName().hashCode(); } if (hasPayload()) { hash = (37 * hash) + PAYLOAD_FIELD_NUMBER; @@ -1742,7 +1744,7 @@ public final class ZooKeeperProtos { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getOriginServerNameFieldBuilder(); + getServerNameFieldBuilder(); } } private static Builder create() { @@ -1757,10 +1759,10 @@ public final class ZooKeeperProtos { bitField0_ = (bitField0_ & ~0x00000002); createTime_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); - if (originServerNameBuilder_ == null) { - originServerName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + if (serverNameBuilder_ == null) { + serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); } else { - originServerNameBuilder_.clear(); + serverNameBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); payload_ = com.google.protobuf.ByteString.EMPTY; @@ -1818,10 +1820,10 @@ public final class ZooKeeperProtos { if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } - if (originServerNameBuilder_ == null) { - result.originServerName_ = originServerName_; + if (serverNameBuilder_ == null) { + result.serverName_ = serverName_; } else { - result.originServerName_ = originServerNameBuilder_.build(); + result.serverName_ = serverNameBuilder_.build(); } if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; @@ -1852,8 +1854,8 @@ public final class ZooKeeperProtos { if (other.hasCreateTime()) { setCreateTime(other.getCreateTime()); } - if (other.hasOriginServerName()) { - mergeOriginServerName(other.getOriginServerName()); + if (other.hasServerName()) { + mergeServerName(other.getServerName()); } if (other.hasPayload()) { setPayload(other.getPayload()); @@ -1875,11 +1877,13 @@ public final class ZooKeeperProtos { return false; } - if (hasOriginServerName()) { - if (!getOriginServerName().isInitialized()) { - - return false; - } + if (!hasServerName()) { + + return false; + } + if (!getServerName().isInitialized()) { + + return false; } return true; } @@ -1924,11 +1928,11 @@ public final class ZooKeeperProtos { } case 34: { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(); - if (hasOriginServerName()) { - subBuilder.mergeFrom(getOriginServerName()); + if (hasServerName()) { + subBuilder.mergeFrom(getServerName()); } input.readMessage(subBuilder, extensionRegistry); - setOriginServerName(subBuilder.buildPartial()); + setServerName(subBuilder.buildPartial()); break; } case 42: { @@ -2008,94 +2012,94 @@ public final class ZooKeeperProtos { return this; } - // optional .ServerName originServerName = 4; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName originServerName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + // required .ServerName serverName = 4; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> originServerNameBuilder_; - public boolean hasOriginServerName() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_; + public boolean hasServerName() { return ((bitField0_ & 0x00000008) == 0x00000008); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getOriginServerName() { - if (originServerNameBuilder_ == null) { - return originServerName_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() { + if (serverNameBuilder_ == null) { + return serverName_; } else { - return originServerNameBuilder_.getMessage(); + return serverNameBuilder_.getMessage(); } } - public Builder setOriginServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (originServerNameBuilder_ == null) { + public Builder setServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverNameBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - originServerName_ = value; + serverName_ = value; onChanged(); } else { - originServerNameBuilder_.setMessage(value); + serverNameBuilder_.setMessage(value); } bitField0_ |= 0x00000008; return this; } - public Builder setOriginServerName( + public Builder setServerName( org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { - if (originServerNameBuilder_ == null) { - originServerName_ = builderForValue.build(); + if (serverNameBuilder_ == null) { + serverName_ = builderForValue.build(); onChanged(); } else { - originServerNameBuilder_.setMessage(builderForValue.build()); + serverNameBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000008; return this; } - public Builder mergeOriginServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (originServerNameBuilder_ == null) { + public Builder mergeServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverNameBuilder_ == null) { if (((bitField0_ & 0x00000008) == 0x00000008) && - originServerName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { - originServerName_ = - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(originServerName_).mergeFrom(value).buildPartial(); + serverName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { + serverName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(serverName_).mergeFrom(value).buildPartial(); } else { - originServerName_ = value; + serverName_ = value; } onChanged(); } else { - originServerNameBuilder_.mergeFrom(value); + serverNameBuilder_.mergeFrom(value); } bitField0_ |= 0x00000008; return this; } - public Builder clearOriginServerName() { - if (originServerNameBuilder_ == null) { - originServerName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + public Builder clearServerName() { + if (serverNameBuilder_ == null) { + serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); onChanged(); } else { - originServerNameBuilder_.clear(); + serverNameBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000008); return this; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getOriginServerNameBuilder() { + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder() { bitField0_ |= 0x00000008; onChanged(); - return getOriginServerNameFieldBuilder().getBuilder(); + return getServerNameFieldBuilder().getBuilder(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getOriginServerNameOrBuilder() { - if (originServerNameBuilder_ != null) { - return originServerNameBuilder_.getMessageOrBuilder(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() { + if (serverNameBuilder_ != null) { + return serverNameBuilder_.getMessageOrBuilder(); } else { - return originServerName_; + return serverName_; } } private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> - getOriginServerNameFieldBuilder() { - if (originServerNameBuilder_ == null) { - originServerNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + getServerNameFieldBuilder() { + if (serverNameBuilder_ == null) { + serverNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( - originServerName_, + serverName_, getParentForChildren(), isClean()); - originServerName_ = null; + serverName_ = null; } - return originServerNameBuilder_; + return serverNameBuilder_; } // optional bytes payload = 5; @@ -4960,25 +4964,24 @@ public final class ZooKeeperProtos { "\n\017ZooKeeper.proto\032\013hbase.proto\"/\n\020RootRe" + "gionServer\022\033\n\006server\030\001 \002(\0132\013.ServerName\"" + "%\n\006Master\022\033\n\006master\030\001 \002(\0132\013.ServerName\"\036" + - "\n\tClusterUp\022\021\n\tstartDate\030\001 \002(\t\"\211\001\n\020Regio" + + "\n\tClusterUp\022\021\n\tstartDate\030\001 \002(\t\"\203\001\n\020Regio" + "nTransition\022\025\n\reventTypeCode\030\001 \002(\r\022\022\n\nre" + - "gionName\030\002 \002(\014\022\022\n\ncreateTime\030\003 \002(\004\022%\n\020or" + - "iginServerName\030\004 \001(\0132\013.ServerName\022\017\n\007pay" + - "load\030\005 \001(\014\"\230\001\n\014SplitLogTask\022\"\n\005state\030\001 \002" + - "(\0162\023.SplitLogTask.State\022\037\n\nserverName\030\002 " + - "\002(\0132\013.ServerName\"C\n\005State\022\016\n\nUNASSIGNED\020", - "\000\022\t\n\005OWNED\020\001\022\014\n\010RESIGNED\020\002\022\010\n\004DONE\020\003\022\007\n\003" + - "ERR\020\004\"n\n\005Table\022$\n\005state\030\001 \002(\0162\014.Table.St" + - "ate:\007ENABLED\"?\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DI" + - "SABLED\020\001\022\r\n\tDISABLING\020\002\022\014\n\010ENABLING\020\003\"%\n" + - "\017ReplicationPeer\022\022\n\nclusterkey\030\001 \002(\t\"^\n\020" + - "ReplicationState\022&\n\005state\030\001 \002(\0162\027.Replic" + - "ationState.State\"\"\n\005State\022\013\n\007ENABLED\020\000\022\014" + - "\n\010DISABLED\020\001\"+\n\027ReplicationHLogPosition\022" + - "\020\n\010position\030\001 \002(\003\"$\n\017ReplicationLock\022\021\n\t" + - "lockOwner\030\001 \002(\tBE\n*org.apache.hadoop.hba", - "se.protobuf.generatedB\017ZooKeeperProtosH\001" + - "\210\001\001\240\001\001" + "gionName\030\002 \002(\014\022\022\n\ncreateTime\030\003 \002(\004\022\037\n\nse" + + "rverName\030\004 \002(\0132\013.ServerName\022\017\n\007payload\030\005" + + " \001(\014\"\230\001\n\014SplitLogTask\022\"\n\005state\030\001 \002(\0162\023.S" + + "plitLogTask.State\022\037\n\nserverName\030\002 \002(\0132\013." + + "ServerName\"C\n\005State\022\016\n\nUNASSIGNED\020\000\022\t\n\005O", + "WNED\020\001\022\014\n\010RESIGNED\020\002\022\010\n\004DONE\020\003\022\007\n\003ERR\020\004\"" + + "n\n\005Table\022$\n\005state\030\001 \002(\0162\014.Table.State:\007E" + + "NABLED\"?\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DISABLED" + + "\020\001\022\r\n\tDISABLING\020\002\022\014\n\010ENABLING\020\003\"%\n\017Repli" + + "cationPeer\022\022\n\nclusterkey\030\001 \002(\t\"^\n\020Replic" + + "ationState\022&\n\005state\030\001 \002(\0162\027.ReplicationS" + + "tate.State\"\"\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DISA" + + "BLED\020\001\"+\n\027ReplicationHLogPosition\022\020\n\010pos" + + "ition\030\001 \002(\003\"$\n\017ReplicationLock\022\021\n\tlockOw" + + "ner\030\001 \002(\tBE\n*org.apache.hadoop.hbase.pro", + "tobuf.generatedB\017ZooKeeperProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -5014,7 +5017,7 @@ public final class ZooKeeperProtos { internal_static_RegionTransition_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionTransition_descriptor, - new java.lang.String[] { "EventTypeCode", "RegionName", "CreateTime", "OriginServerName", "Payload", }, + new java.lang.String[] { "EventTypeCode", "RegionName", "CreateTime", "ServerName", "Payload", }, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.Builder.class); internal_static_SplitLogTask_descriptor = diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 334d2a61fc0..51e03a76437 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -135,6 +135,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRespo import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse.RegionOpeningState; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest; @@ -171,7 +172,6 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; @@ -228,7 +228,6 @@ import org.codehaus.jackson.map.ObjectMapper; import com.google.common.base.Function; import com.google.protobuf.ByteString; -import com.google.protobuf.Message; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; @@ -2595,14 +2594,13 @@ public class HRegionServer implements ClientProtocol, } } - protected void checkIfRegionInTransition(HRegionInfo region, + protected void checkIfRegionInTransition(byte[] regionEncodedName, String currentAction) throws RegionAlreadyInTransitionException { - byte[] encodedName = region.getEncodedNameAsBytes(); - if (this.regionsInTransitionInRS.containsKey(encodedName)) { - boolean openAction = this.regionsInTransitionInRS.get(encodedName); + if (this.regionsInTransitionInRS.containsKey(regionEncodedName)) { + boolean openAction = this.regionsInTransitionInRS.get(regionEncodedName); // The below exception message will be used in master. throw new RegionAlreadyInTransitionException("Received:" + currentAction + - " for the region:" + region.getRegionNameAsString() + + " for the region:" + Bytes.toString(regionEncodedName) + " ,which we are already trying to " + (openAction ? OPEN : CLOSE)+ "."); } @@ -3568,12 +3566,8 @@ public class HRegionServer implements ClientProtocol, */ @Override @QosPriority(priority=HConstants.HIGH_QOS) - public OpenRegionResponse openRegion(final RpcController controller, final OpenRegionRequest request) - throws ServiceException { - int versionOfOfflineNode = -1; - if (request.hasVersionOfOfflineNode()) { - versionOfOfflineNode = request.getVersionOfOfflineNode(); - } + public OpenRegionResponse openRegion(final RpcController controller, + final OpenRegionRequest request) throws ServiceException { try { checkOpen(); } catch (IOException ie) { @@ -3581,13 +3575,18 @@ public class HRegionServer implements ClientProtocol, } requestCount.incrementAndGet(); OpenRegionResponse.Builder builder = OpenRegionResponse.newBuilder(); - Map htds = new HashMap( - request.getRegionList().size()); - boolean isBulkAssign = request.getRegionList().size() > 1; - for (RegionInfo regionInfo : request.getRegionList()) { - HRegionInfo region = HRegionInfo.convert(regionInfo); + int regionCount = request.getOpenInfoCount(); + Map htds = + new HashMap(regionCount); + boolean isBulkAssign = regionCount > 1; + for (RegionOpenInfo regionOpenInfo : request.getOpenInfoList()) { + HRegionInfo region = HRegionInfo.convert(regionOpenInfo.getRegion()); + int versionOfOfflineNode = -1; + if (regionOpenInfo.hasVersionOfOfflineNode()) { + versionOfOfflineNode = regionOpenInfo.getVersionOfOfflineNode(); + } try { - checkIfRegionInTransition(region, OPEN); + checkIfRegionInTransition(region.getEncodedNameAsBytes(), OPEN); HRegion onlineRegion = getFromOnlineRegions(region.getEncodedName()); if (null != onlineRegion) { // See HBASE-5094. Cross check with META if still this RS is owning @@ -3643,7 +3642,6 @@ public class HRegionServer implements ClientProtocol, } } return builder.build(); - } /** @@ -3668,17 +3666,26 @@ public class HRegionServer implements ClientProtocol, try { checkOpen(); requestCount.incrementAndGet(); - HRegion region = getRegion(request.getRegion()); - CloseRegionResponse.Builder - builder = CloseRegionResponse.newBuilder(); + String encodedRegionName = + ProtobufUtil.getRegionEncodedName(request.getRegion()); + byte[] encodedName = Bytes.toBytes(encodedRegionName); + Boolean openAction = regionsInTransitionInRS.get(encodedName); + if (openAction != null) { + if (openAction.booleanValue()) { + regionsInTransitionInRS.replace(encodedName, openAction, Boolean.FALSE); + } + checkIfRegionInTransition(encodedName, CLOSE); + } + HRegion region = getRegionByEncodedName(encodedRegionName); LOG.info("Received close region: " + region.getRegionNameAsString() + ". Version of ZK closing node:" + versionOfClosingNode + ". Destination server:" + sn); HRegionInfo regionInfo = region.getRegionInfo(); - checkIfRegionInTransition(regionInfo, CLOSE); + checkIfRegionInTransition(encodedName, CLOSE); boolean closed = closeRegion( regionInfo, false, zk, versionOfClosingNode, sn); - builder.setClosed(closed); + CloseRegionResponse.Builder builder = + CloseRegionResponse.newBuilder().setClosed(closed); return builder.build(); } catch (IOException ie) { throw new ServiceException(ie); @@ -3874,18 +3881,8 @@ public class HRegionServer implements ClientProtocol, */ protected HRegion getRegion( final RegionSpecifier regionSpecifier) throws IOException { - byte[] value = regionSpecifier.getValue().toByteArray(); - RegionSpecifierType type = regionSpecifier.getType(); - checkOpen(); - switch (type) { - case REGION_NAME: - return getRegion(value); - case ENCODED_REGION_NAME: - return getRegionByEncodedName(Bytes.toString(value)); - default: - throw new DoNotRetryIOException( - "Unsupported region specifier type: " + type); - } + return getRegionByEncodedName( + ProtobufUtil.getRegionEncodedName(regionSpecifier)); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java index f94e52b01a7..074ef8f9031 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java @@ -134,6 +134,14 @@ public class OpenRegionHandler extends EventHandler { cleanupFailedOpen(region); return; } + + // One more check to make sure we are opening instead of closing + if (!isRegionStillOpening()) { + LOG.warn("Open region aborted since it isn't opening any more"); + cleanupFailedOpen(region); + return; + } + // Successful region open, and add it to OnlineRegions this.rsServices.addToOnlineRegions(region); @@ -269,6 +277,10 @@ public class OpenRegionHandler extends EventHandler { * @throws IOException */ private boolean transitionToOpened(final HRegion r) throws IOException { + if (!isRegionStillOpening()) { + LOG.warn("Open region aborted since it isn't opening any more"); + return false; + } boolean result = false; HRegionInfo hri = r.getRegionInfo(); final String name = hri.getRegionNameAsString(); @@ -364,6 +376,12 @@ public class OpenRegionHandler extends EventHandler { if (region != null) region.close(); } + private boolean isRegionStillOpening() { + byte[] encodedName = regionInfo.getEncodedNameAsBytes(); + Boolean action = rsServices.getRegionsInTransitionInRS().get(encodedName); + return action != null && action.booleanValue(); + } + /** * Transition ZK node from OFFLINE to OPENING. * @param encodedName Name of the znode file (Region encodedName is the znode @@ -374,6 +392,10 @@ public class OpenRegionHandler extends EventHandler { */ boolean transitionZookeeperOfflineToOpening(final String encodedName, int versionOfOfflineNode) { + if (!isRegionStillOpening()) { + LOG.warn("Open region aborted since it isn't opening any more"); + return false; + } // TODO: should also handle transition from CLOSED? try { // Initialize the znode version. @@ -399,6 +421,10 @@ public class OpenRegionHandler extends EventHandler { * @return True if successful transition. */ boolean tickleOpening(final String context) { + if (!isRegionStillOpening()) { + LOG.warn("Open region aborted since it isn't opening any more"); + return false; + } // If previous checks failed... do not try again. if (!isGoodVersion()) return false; String encodedName = this.regionInfo.getEncodedName(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java index 0c58543c062..c753f336a3a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java @@ -19,14 +19,18 @@ package org.apache.hadoop.hbase.util; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - import java.util.HashMap; import java.util.Map; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + /** * A utility class to manage a set of locks. Each lock is identified by a String which serves * as a key. Typical usage is:

@@ -44,7 +48,7 @@ import java.util.concurrent.locks.ReentrantLock; * } *

*/ -public class KeyLocker { +public class KeyLocker> { private static final Log LOG = LogFactory.getLog(KeyLocker.class); // The number of lock we want to easily support. It's not a maximum. @@ -78,6 +82,19 @@ public class KeyLocker { return lock.getFirst(); } + /** + * Acquire locks for a set of keys. The keys will be + * sorted internally to avoid possible deadlock. + */ + public Map acquireLocks(final Set keys) { + Map locks = new HashMap(keys.size()); + SortedSet sortedKeys = new TreeSet(keys); + for (K key : sortedKeys) { + locks.put(key, acquireLock(key)); + } + return locks; + } + /** * Free the lock for the given key. */ @@ -95,7 +112,9 @@ public class KeyLocker { } } - static class KeyLock extends ReentrantLock { + static class KeyLock> extends ReentrantLock { + private static final long serialVersionUID = -12432857283423584L; + private final KeyLocker locker; private final K lockId; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAssign.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAssign.java index a267a20762a..58ada7b36a8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAssign.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAssign.java @@ -133,7 +133,7 @@ public class ZKAssign { * * @param zkw zk reference * @param region region to be created as offline - * @param serverName server event originates from + * @param serverName server transition will happen on * @throws KeeperException if unexpected zookeeper exception * @throws KeeperException.NodeExistsException if node already exists */ @@ -163,7 +163,7 @@ public class ZKAssign { * * @param zkw zk reference * @param region region to be created as offline - * @param serverName server event originates from + * @param serverName server transition will happen on * @param cb * @param ctx * @throws KeeperException if unexpected zookeeper exception @@ -181,35 +181,6 @@ public class ZKAssign { ZKUtil.asyncCreate(zkw, node, rt.toByteArray(), cb, ctx); } - /** - * Forces an existing unassigned node to the OFFLINE state for the specified - * region. - * - *

Does not create a new node. If a node does not already exist for this - * region, a {@link NoNodeException} will be thrown. - * - *

Sets a watcher on the unassigned region node if the method is - * successful. - * - *

This method should only be used during recovery of regionserver failure. - * - * @param zkw zk reference - * @param region region to be forced as offline - * @param serverName server event originates from - * @throws KeeperException if unexpected zookeeper exception - * @throws KeeperException.NoNodeException if node does not exist - */ - public static void forceNodeOffline(ZooKeeperWatcher zkw, HRegionInfo region, - ServerName serverName) - throws KeeperException, KeeperException.NoNodeException { - LOG.debug(zkw.prefix("Forcing existing unassigned node for " + - region.getEncodedName() + " to OFFLINE state")); - RegionTransition rt = - RegionTransition.createRegionTransition(EventType.M_ZK_REGION_OFFLINE, region.getRegionName(), serverName); - String node = getNodeName(zkw, region.getEncodedName()); - ZKUtil.setData(zkw, node, rt.toByteArray()); - } - /** * Creates or force updates an unassigned node to the OFFLINE state for the * specified region. @@ -224,7 +195,7 @@ public class ZKAssign { * * @param zkw zk reference * @param region region to be created as offline - * @param serverName server event originates from + * @param serverName server transition will happen on * @return the version of the znode created in OFFLINE state, -1 if * unsuccessful. * @throws KeeperException if unexpected zookeeper exception @@ -232,76 +203,17 @@ public class ZKAssign { */ public static int createOrForceNodeOffline(ZooKeeperWatcher zkw, HRegionInfo region, ServerName serverName) throws KeeperException { - return createOrForceNodeOffline(zkw, region, serverName, false, true); - } - - /** - * Creates or force updates an unassigned node to the OFFLINE state for the - * specified region. - *

- * Attempts to create the node but if it exists will force it to transition to - * and OFFLINE state. - *

- * Sets a watcher on the unassigned region node if the method is successful. - * - *

- * This method should be used when assigning a region. - * - * @param zkw - * zk reference - * @param region - * region to be created as offline - * @param serverName - * server event originates from - * @param hijack - * - true if to be hijacked and reassigned, false otherwise - * @param allowCreation - * - true if the node has to be created newly, false otherwise - * @throws KeeperException - * if unexpected zookeeper exception - * @return the version of the znode created in OFFLINE state, -1 if - * unsuccessful. - * @throws KeeperException.NodeExistsException - * if node already exists - */ - public static int createOrForceNodeOffline(ZooKeeperWatcher zkw, - HRegionInfo region, ServerName serverName, - boolean hijack, boolean allowCreation) - throws KeeperException { LOG.debug(zkw.prefix("Creating (or updating) unassigned node for " + region.getEncodedName() + " with OFFLINE state")); RegionTransition rt = RegionTransition.createRegionTransition(EventType.M_ZK_REGION_OFFLINE, region.getRegionName(), serverName, HConstants.EMPTY_BYTE_ARRAY); byte [] data = rt.toByteArray(); String node = getNodeName(zkw, region.getEncodedName()); - Stat stat = new Stat(); zkw.sync(node); int version = ZKUtil.checkExists(zkw, node); if (version == -1) { - // While trying to transit a node to OFFLINE that was in previously in - // OPENING state but before it could transit to OFFLINE state if RS had - // opened the region then the Master deletes the assigned region znode. - // In that case the znode will not exist. So we should not - // create the znode again which will lead to double assignment. - if (hijack && !allowCreation) { - return -1; - } return ZKUtil.createAndWatch(zkw, node, data); } else { - byte [] curDataInZNode = ZKAssign.getDataNoWatch(zkw, region.getEncodedName(), stat); - RegionTransition curRt = getRegionTransition(curDataInZNode); - // Do not move the node to OFFLINE if znode is in any of the following - // state. - // Because these are already executed states. - if (hijack && curRt != null) { - EventType eventType = curRt.getEventType(); - if (eventType.equals(EventType.M_ZK_REGION_CLOSING) - || eventType.equals(EventType.RS_ZK_REGION_CLOSED) - || eventType.equals(EventType.RS_ZK_REGION_OPENED)) { - return -1; - } - } - boolean setData = false; try { setData = ZKUtil.setData(zkw, node, data, version); @@ -327,7 +239,7 @@ public class ZKAssign { } } } - return stat.getVersion() + 1; + return version + 1; } /** @@ -558,7 +470,7 @@ public class ZKAssign { * * @param zkw zk reference * @param region region to be created as closing - * @param serverName server event originates from + * @param serverName server transition will happen on * @return version of node after transition, -1 if unsuccessful transition * @throws KeeperException if unexpected zookeeper exception * @throws KeeperException.NodeExistsException if node already exists @@ -596,7 +508,7 @@ public class ZKAssign { * * @param zkw zk reference * @param region region to be transitioned to closed - * @param serverName server event originates from + * @param serverName server transition happens on * @return version of node after transition, -1 if unsuccessful transition * @throws KeeperException if unexpected zookeeper exception */ @@ -630,7 +542,7 @@ public class ZKAssign { * * @param zkw zk reference * @param region region to be transitioned to opening - * @param serverName server event originates from + * @param serverName server transition happens on * @return version of node after transition, -1 if unsuccessful transition * @throws KeeperException if unexpected zookeeper exception */ @@ -670,7 +582,7 @@ public class ZKAssign { * * @param zkw zk reference * @param region region to be transitioned to opening - * @param serverName server event originates from + * @param serverName server transition happens on * @return version of node after transition, -1 if unsuccessful transition * @throws KeeperException if unexpected zookeeper exception */ @@ -706,7 +618,7 @@ public class ZKAssign { * * @param zkw zk reference * @param region region to be transitioned to opened - * @param serverName server event originates from + * @param serverName server transition happens on * @return version of node after transition, -1 if unsuccessful transition * @throws KeeperException if unexpected zookeeper exception */ @@ -739,7 +651,7 @@ public class ZKAssign { * * @param zkw zk reference * @param region region to be transitioned to opened - * @param serverName server event originates from + * @param serverName server transition happens on * @param endState state to transition node to if all checks pass * @param beginState state the node must currently be in to do transition * @param expectedVersion expected version of data before modification, or -1 diff --git a/hbase-server/src/main/protobuf/Admin.proto b/hbase-server/src/main/protobuf/Admin.proto index 74f29f3a99c..944bd19e187 100644 --- a/hbase-server/src/main/protobuf/Admin.proto +++ b/hbase-server/src/main/protobuf/Admin.proto @@ -64,8 +64,12 @@ message GetOnlineRegionResponse { } message OpenRegionRequest { - repeated RegionInfo region = 1; - optional uint32 versionOfOfflineNode = 2; + repeated RegionOpenInfo openInfo = 1; + + message RegionOpenInfo { + required RegionInfo region = 1; + optional uint32 versionOfOfflineNode = 2; + } } message OpenRegionResponse { diff --git a/hbase-server/src/main/protobuf/ZooKeeper.proto b/hbase-server/src/main/protobuf/ZooKeeper.proto index 50cc3b33349..69a5c01e240 100644 --- a/hbase-server/src/main/protobuf/ZooKeeper.proto +++ b/hbase-server/src/main/protobuf/ZooKeeper.proto @@ -62,7 +62,8 @@ message RegionTransition { // Full regionname in bytes required bytes regionName = 2; required uint64 createTime = 3; - optional ServerName originServerName = 4; + // The region server where the transition will happen or is happening + required ServerName serverName = 4; optional bytes payload = 5; } diff --git a/hbase-server/src/main/ruby/shell/commands/assign.rb b/hbase-server/src/main/ruby/shell/commands/assign.rb index de0071a68d8..9978ff8075d 100644 --- a/hbase-server/src/main/ruby/shell/commands/assign.rb +++ b/hbase-server/src/main/ruby/shell/commands/assign.rb @@ -22,9 +22,8 @@ module Shell class Assign < Command def help return <<-EOF -Assign a region.Use with caution.If region already assigned, -this command will just go ahead and reassign -the region anyways. For experts only. +Assign a region. Use with caution. If region already assigned, +this command will do a force reassign. For experts only. EOF end diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java index 9d1170e380b..1edc706d675 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java @@ -192,6 +192,11 @@ public class TestAssignmentManager { createRegionPlanAndBalance(am, SERVERNAME_A, SERVERNAME_B, REGIONINFO); startFakeFailedOverMasterAssignmentManager(am, this.watcher); while (!am.processRITInvoked) Thread.sleep(1); + // As part of the failover cleanup, the balancing region plan is removed. + // So a random server will be used to open the region. For testing purpose, + // let's assume it is going to open on server b: + am.addPlan(REGIONINFO.getEncodedName(), new RegionPlan(REGIONINFO, null, SERVERNAME_B)); + // Now fake the region closing successfully over on the regionserver; the // regionserver will have set the region in CLOSED state. This will // trigger callback into AM. The below zk close call is from the RS close @@ -208,7 +213,7 @@ public class TestAssignmentManager { assertNotSame(-1, versionid); // This uglyness below is what the openregionhandler on RS side does. versionid = ZKAssign.transitionNode(server.getZooKeeper(), REGIONINFO, - SERVERNAME_A, EventType.M_ZK_REGION_OFFLINE, + SERVERNAME_B, EventType.M_ZK_REGION_OFFLINE, EventType.RS_ZK_REGION_OPENING, versionid); assertNotSame(-1, versionid); // Move znode from OPENING to OPENED as RS does on successful open. @@ -233,6 +238,11 @@ public class TestAssignmentManager { createRegionPlanAndBalance(am, SERVERNAME_A, SERVERNAME_B, REGIONINFO); startFakeFailedOverMasterAssignmentManager(am, this.watcher); while (!am.processRITInvoked) Thread.sleep(1); + // As part of the failover cleanup, the balancing region plan is removed. + // So a random server will be used to open the region. For testing purpose, + // let's assume it is going to open on server b: + am.addPlan(REGIONINFO.getEncodedName(), new RegionPlan(REGIONINFO, null, SERVERNAME_B)); + // Now fake the region closing successfully over on the regionserver; the // regionserver will have set the region in CLOSED state. This will // trigger callback into AM. The below zk close call is from the RS close @@ -250,7 +260,7 @@ public class TestAssignmentManager { assertNotSame(-1, versionid); // This uglyness below is what the openregionhandler on RS side does. versionid = ZKAssign.transitionNode(server.getZooKeeper(), REGIONINFO, - SERVERNAME_A, EventType.M_ZK_REGION_OFFLINE, + SERVERNAME_B, EventType.M_ZK_REGION_OFFLINE, EventType.RS_ZK_REGION_OPENING, versionid); assertNotSame(-1, versionid); // Move znode from OPENING to OPENED as RS does on successful open. @@ -275,6 +285,11 @@ public class TestAssignmentManager { createRegionPlanAndBalance(am, SERVERNAME_A, SERVERNAME_B, REGIONINFO); startFakeFailedOverMasterAssignmentManager(am, this.watcher); while (!am.processRITInvoked) Thread.sleep(1); + // As part of the failover cleanup, the balancing region plan is removed. + // So a random server will be used to open the region. For testing purpose, + // let's assume it is going to open on server b: + am.addPlan(REGIONINFO.getEncodedName(), new RegionPlan(REGIONINFO, null, SERVERNAME_B)); + // Now fake the region closing successfully over on the regionserver; the // regionserver will have set the region in CLOSED state. This will // trigger callback into AM. The below zk close call is from the RS close @@ -292,7 +307,7 @@ public class TestAssignmentManager { assertNotSame(-1, versionid); // This uglyness below is what the openregionhandler on RS side does. versionid = ZKAssign.transitionNode(server.getZooKeeper(), REGIONINFO, - SERVERNAME_A, EventType.M_ZK_REGION_OFFLINE, + SERVERNAME_B, EventType.M_ZK_REGION_OFFLINE, EventType.RS_ZK_REGION_OPENING, versionid); assertNotSame(-1, versionid); // Move znode from OPENING to OPENED as RS does on successful open. @@ -798,12 +813,11 @@ public class TestAssignmentManager { EventType.RS_ZK_REGION_OPENING, version); RegionTransition rt = RegionTransition.createRegionTransition(EventType.RS_ZK_REGION_OPENING, REGIONINFO.getRegionName(), SERVERNAME_A, HConstants.EMPTY_BYTE_ARRAY); - Map> deadServers = - new HashMap>(); - deadServers.put(SERVERNAME_A, null); version = ZKAssign.getVersion(this.watcher, REGIONINFO); + Mockito.when(this.serverManager.isServerOnline(SERVERNAME_A)).thenReturn(false); + am.getRegionStates().createRegionState(REGIONINFO); am.gate.set(false); - am.processRegionsInTransition(rt, REGIONINFO, deadServers, version); + am.processRegionsInTransition(rt, REGIONINFO, version); // Waiting for the assignment to get completed. while (!am.gate.get()) { Thread.sleep(10); @@ -1017,22 +1031,18 @@ public class TestAssignmentManager { @Override boolean processRegionInTransition(String encodedRegionName, - HRegionInfo regionInfo, - Map> deadServers) - throws KeeperException, IOException { + HRegionInfo regionInfo) throws KeeperException, IOException { this.processRITInvoked = true; - return super.processRegionInTransition(encodedRegionName, regionInfo, - deadServers); + return super.processRegionInTransition(encodedRegionName, regionInfo); } @Override - public void assign(HRegionInfo region, boolean setOfflineInZK, boolean forceNewPlan, - boolean hijack) { + public void assign(HRegionInfo region, boolean setOfflineInZK, boolean forceNewPlan) { if (enabling) { assignmentCount++; this.regionOnline(region, SERVERNAME_A); } else { - super.assign(region, setOfflineInZK, forceNewPlan, hijack); + super.assign(region, setOfflineInZK, forceNewPlan); this.gate.set(true); } } @@ -1097,5 +1107,4 @@ public class TestAssignmentManager { t.start(); while (!t.isAlive()) Threads.sleep(1); } - } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestCloseRegionHandler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestCloseRegionHandler.java index 34e8ad79fde..8390544e5a3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestCloseRegionHandler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestCloseRegionHandler.java @@ -210,6 +210,7 @@ public class TestCloseRegionHandler { // Create it OFFLINE node, which is what Master set before sending OPEN RPC ZKAssign.createNodeOffline(server.getZooKeeper(), hri, server.getServerName()); OpenRegionHandler openHandler = new OpenRegionHandler(server, rss, hri, htd); + rss.getRegionsInTransitionInRS().put(hri.getEncodedNameAsBytes(), Boolean.TRUE); openHandler.process(); // This parse is not used? RegionTransition.parseFrom(ZKAssign.getData(server.getZooKeeper(), hri.getEncodedName())); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java index 6bc77e8b3a5..aa18cb723a0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java @@ -114,9 +114,13 @@ public class TestOpenRegionHandler { return region; } }; + rss.getRegionsInTransitionInRS().put( + hri.getEncodedNameAsBytes(), Boolean.TRUE); // Call process without first creating OFFLINE region in zk, see if // exception or just quiet return (expected). handler.process(); + rss.getRegionsInTransitionInRS().put( + hri.getEncodedNameAsBytes(), Boolean.TRUE); ZKAssign.createNodeOffline(server.getZooKeeper(), hri, server.getServerName()); // Call process again but this time yank the zk znode out from under it // post OPENING; again will expect it to come back w/o NPE or exception. @@ -143,6 +147,8 @@ public class TestOpenRegionHandler { return null; } }; + rsServices.getRegionsInTransitionInRS().put( + TEST_HRI.getEncodedNameAsBytes(), Boolean.TRUE); handler.process(); // Handler should have transitioned it to FAILED_OPEN @@ -168,6 +174,8 @@ public class TestOpenRegionHandler { return false; } }; + rsServices.getRegionsInTransitionInRS().put( + TEST_HRI.getEncodedNameAsBytes(), Boolean.TRUE); handler.process(); // Handler should have transitioned it to FAILED_OPEN