HBASE-478 offlining of table does not run reliably
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@654193 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
cfefc60bac
commit
b3f4ddc1bc
|
@ -33,6 +33,7 @@ Hbase Change Log
|
|||
HBASE-405 TIF and TOF use log4j directly rather than apache commons-logging
|
||||
HBASE-618 We always compact if 2 files, regardless of the compaction threshold setting
|
||||
HBASE-619 Fix 'logs' link in UI
|
||||
HBASE-478 offlining of table does not run reliably
|
||||
|
||||
IMPROVEMENTS
|
||||
HBASE-559 MR example job to count table rows
|
||||
|
|
|
@ -357,10 +357,10 @@ abstract class BaseScanner extends Chore implements HConstants {
|
|||
|
||||
// Skip region - if ...
|
||||
if(info.isOffline() // offline
|
||||
|| regionManager.isClosing(info.getRegionName()) // queued for offline
|
||||
|| regionManager.isMarkedForDeletion(info.getRegionName())) { // queued for delete
|
||||
|| regionManager.isClosing(info.getRegionName())) { // queued for offline
|
||||
|
||||
regionManager.noLongerUnassigned(info);
|
||||
regionManager.noLongerPending(info.getRegionName());
|
||||
return;
|
||||
}
|
||||
HServerInfo storedInfo = null;
|
||||
|
|
|
@ -71,8 +71,8 @@ class ChangeTableState extends TableOperation {
|
|||
for (HRegionInfo i: unservedRegions) {
|
||||
if (i.isOffline() && i.isSplit()) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Skipping region " + i.toString() + " because it is " +
|
||||
"offline because it has been split");
|
||||
LOG.debug("Skipping region " + i.toString() +
|
||||
" because it is offline because it has been split");
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
@ -94,6 +94,7 @@ class ChangeTableState extends TableOperation {
|
|||
|
||||
if (online) {
|
||||
// Bring offline regions on-line
|
||||
master.regionManager.noLongerClosing(i.getRegionName());
|
||||
if (!master.regionManager.isUnassigned(i)) {
|
||||
master.regionManager.setUnassigned(i);
|
||||
}
|
||||
|
@ -119,23 +120,21 @@ class ChangeTableState extends TableOperation {
|
|||
|
||||
HashMap<Text, HRegionInfo> localKillList =
|
||||
new HashMap<Text, HRegionInfo>();
|
||||
|
||||
Map<Text, HRegionInfo> killedRegions =
|
||||
master.regionManager.getMarkedToClose(serverName);
|
||||
if (killedRegions != null) {
|
||||
localKillList.putAll(killedRegions);
|
||||
}
|
||||
|
||||
|
||||
for (HRegionInfo i: e.getValue()) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("adding region " + i.getRegionName() +
|
||||
" to kill list");
|
||||
LOG.debug("adding region " + i.getRegionName() + " to kill list");
|
||||
}
|
||||
// this marks the regions to be closed
|
||||
localKillList.put(i.getRegionName(), i);
|
||||
// this marks the regions to be offlined once they are closed
|
||||
master.regionManager.markRegionForOffline(i.getRegionName());
|
||||
}
|
||||
Map<Text, HRegionInfo> killedRegions =
|
||||
master.regionManager.removeMarkedToClose(serverName);
|
||||
if (killedRegions != null) {
|
||||
localKillList.putAll(killedRegions);
|
||||
}
|
||||
if (localKillList.size() > 0) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("inserted local kill list into kill list for server " +
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.master;
|
|||
import java.util.Map;
|
||||
import java.io.IOException;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.InvalidColumnNameException;
|
||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.io.Text;
|
||||
|
@ -51,9 +52,8 @@ class ModifyColumn extends ColumnOperation {
|
|||
if (families.get(columnName) != null){
|
||||
families.put(columnName, descriptor);
|
||||
updateRegionInfo(server, m.getRegionName(), i);
|
||||
}
|
||||
else{ // otherwise, we have an error.
|
||||
throw new IOException("Column family '" + columnName +
|
||||
} else{ // otherwise, we have an error.
|
||||
throw new InvalidColumnNameException("Column family '" + columnName +
|
||||
"' doesn't exist, so cannot be modified.");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.master;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
|
||||
|
@ -35,28 +34,25 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
|||
*/
|
||||
class ProcessRegionClose extends ProcessRegionStatusChange {
|
||||
protected final boolean offlineRegion;
|
||||
protected final boolean deleteRegion;
|
||||
|
||||
/**
|
||||
* @param master
|
||||
* @param regionInfo Region to operate on
|
||||
* @param offlineRegion if true, set the region to offline in meta
|
||||
* @param deleteRegion if true, delete the region row from meta and then
|
||||
* delete the region files from disk.
|
||||
*/
|
||||
public ProcessRegionClose(HMaster master, HRegionInfo regionInfo,
|
||||
boolean offlineRegion, boolean deleteRegion) {
|
||||
boolean offlineRegion) {
|
||||
|
||||
super(master, regionInfo);
|
||||
this.offlineRegion = offlineRegion;
|
||||
this.deleteRegion = deleteRegion;
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ProcessRegionClose of " + this.regionInfo.getRegionName() +
|
||||
", " + this.offlineRegion + ", " + this.deleteRegion;
|
||||
", " + this.offlineRegion;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -76,10 +72,7 @@ class ProcessRegionClose extends ProcessRegionStatusChange {
|
|||
return true;
|
||||
}
|
||||
|
||||
if (deleteRegion) {
|
||||
HRegion.removeRegionFromMETA(server, metaRegionName,
|
||||
regionInfo.getRegionName());
|
||||
} else if (offlineRegion) {
|
||||
if (offlineRegion) {
|
||||
// offline the region in meta and then note that we've offlined the
|
||||
// region.
|
||||
HRegion.offlineRegionInMETA(server, metaRegionName,
|
||||
|
@ -90,17 +83,6 @@ class ProcessRegionClose extends ProcessRegionStatusChange {
|
|||
}
|
||||
}.doWithRetries();
|
||||
|
||||
// now that meta is updated, if we need to delete the region's files, now's
|
||||
// the time.
|
||||
if (deleteRegion) {
|
||||
try {
|
||||
HRegion.deleteRegion(master.fs, master.rootdir, regionInfo);
|
||||
} catch (IOException e) {
|
||||
e = RemoteExceptionHandler.checkIOException(e);
|
||||
LOG.error("failed delete region " + regionInfo.getRegionName(), e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
return result == null ? true : result;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -52,13 +52,11 @@ class ProcessServerShutdown extends RegionServerOperation {
|
|||
private boolean rootRescanned;
|
||||
|
||||
private class ToDoEntry {
|
||||
boolean deleteRegion;
|
||||
boolean regionOffline;
|
||||
Text row;
|
||||
HRegionInfo info;
|
||||
|
||||
ToDoEntry(Text row, HRegionInfo info) {
|
||||
this.deleteRegion = false;
|
||||
this.regionOffline = false;
|
||||
this.row = row;
|
||||
this.info = info;
|
||||
|
@ -154,22 +152,14 @@ class ProcessServerShutdown extends RegionServerOperation {
|
|||
if (master.regionManager.isMarkedToClose(deadServerName, info.getRegionName())) {
|
||||
master.regionManager.noLongerMarkedToClose(deadServerName, info.getRegionName());
|
||||
master.regionManager.noLongerUnassigned(info);
|
||||
if (master.regionManager.isMarkedForDeletion(info.getRegionName())) {
|
||||
// Delete this region
|
||||
master.regionManager.regionDeleted(info.getRegionName());
|
||||
todo.deleteRegion = true;
|
||||
} else {
|
||||
// Mark region offline
|
||||
todo.regionOffline = true;
|
||||
}
|
||||
// Mark region offline
|
||||
todo.regionOffline = true;
|
||||
} else {
|
||||
// Get region reassigned
|
||||
regions.add(info);
|
||||
|
||||
// If it was pending, remove.
|
||||
// Otherwise will obstruct its getting reassigned.
|
||||
master.regionManager.noLongerPending(info.getRegionName());
|
||||
}
|
||||
// If it was pending, remove.
|
||||
master.regionManager.noLongerPending(info.getRegionName());
|
||||
}
|
||||
} finally {
|
||||
if(scannerId != -1L) {
|
||||
|
@ -192,9 +182,7 @@ class ProcessServerShutdown extends RegionServerOperation {
|
|||
}
|
||||
// Update server in root/meta entries
|
||||
for (ToDoEntry e: toDoList) {
|
||||
if (e.deleteRegion) {
|
||||
HRegion.removeRegionFromMETA(server, regionName, e.row);
|
||||
} else if (e.regionOffline) {
|
||||
if (e.regionOffline) {
|
||||
HRegion.offlineRegionInMETA(server, regionName, e.info);
|
||||
}
|
||||
}
|
||||
|
@ -314,6 +302,7 @@ class ProcessServerShutdown extends RegionServerOperation {
|
|||
r.getRegionName() + " on " + r.getServer());
|
||||
}
|
||||
}
|
||||
master.regionManager.allRegionsClosed(deadServerName);
|
||||
master.serverManager.removeDeadServer(deadServerName);
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -53,7 +53,7 @@ import org.apache.hadoop.hbase.util.Writables;
|
|||
* Class to manage assigning regions to servers, state of root and meta, etc.
|
||||
*/
|
||||
class RegionManager implements HConstants {
|
||||
protected static final Log LOG = LogFactory.getLog(RegionManager.class.getName());
|
||||
protected static final Log LOG = LogFactory.getLog(RegionManager.class);
|
||||
|
||||
private volatile AtomicReference<HServerAddress> rootRegionLocation =
|
||||
new AtomicReference<HServerAddress>(null);
|
||||
|
@ -103,13 +103,6 @@ class RegionManager implements HConstants {
|
|||
private final Set<Text> closingRegions =
|
||||
Collections.synchronizedSet(new HashSet<Text>());
|
||||
|
||||
/**
|
||||
* 'regionsToDelete' contains regions that need to be deleted, but cannot be
|
||||
* until the region server closes it
|
||||
*/
|
||||
private final Set<Text> regionsToDelete =
|
||||
Collections.synchronizedSet(new HashSet<Text>());
|
||||
|
||||
/**
|
||||
* Set of regions that, once closed, should be marked as offline so that they
|
||||
* are not reassigned.
|
||||
|
@ -119,7 +112,7 @@ class RegionManager implements HConstants {
|
|||
// How many regions to assign a server at a time.
|
||||
private final int maxAssignInOneGo;
|
||||
|
||||
private HMaster master;
|
||||
private final HMaster master;
|
||||
|
||||
RegionManager(HMaster master) {
|
||||
this.master = master;
|
||||
|
@ -179,7 +172,7 @@ class RegionManager implements HConstants {
|
|||
// figure out what regions need to be assigned and aren't currently being
|
||||
// worked on elsewhere.
|
||||
Set<HRegionInfo> regionsToAssign = regionsAwaitingAssignment();
|
||||
|
||||
|
||||
if (regionsToAssign.size() == 0) {
|
||||
// There are no regions waiting to be assigned. This is an opportunity
|
||||
// for us to check if this server is overloaded.
|
||||
|
@ -199,8 +192,8 @@ class RegionManager implements HConstants {
|
|||
} else {
|
||||
// otherwise, give this server a few regions taking into account the
|
||||
// load of all the other servers.
|
||||
assignRegionsToMultipleServers(thisServersLoad, regionsToAssign,
|
||||
serverName, returnMsgs);
|
||||
assignRegionsToMultipleServers(thisServersLoad, regionsToAssign,
|
||||
serverName, returnMsgs);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -320,19 +313,21 @@ class RegionManager implements HConstants {
|
|||
|
||||
// Look over the set of regions that aren't currently assigned to
|
||||
// determine which we should assign to this server.
|
||||
for (Map.Entry<HRegionInfo, Long> e: unassignedRegions.entrySet()) {
|
||||
HRegionInfo i = e.getKey();
|
||||
if (numberOfMetaRegions.get() != onlineMetaRegions.size() &&
|
||||
!i.isMetaRegion()) {
|
||||
// Can't assign user regions until all meta regions have been assigned
|
||||
// and are on-line
|
||||
continue;
|
||||
}
|
||||
// If the last attempt to open this region was pretty recent, then we
|
||||
// don't want to try and assign it.
|
||||
long diff = now - e.getValue().longValue();
|
||||
if (diff > master.maxRegionOpenTime) {
|
||||
regionsToAssign.add(e.getKey());
|
||||
synchronized (unassignedRegions) { //must synchronize when iterating
|
||||
for (Map.Entry<HRegionInfo, Long> e: unassignedRegions.entrySet()) {
|
||||
HRegionInfo i = e.getKey();
|
||||
if (numberOfMetaRegions.get() != onlineMetaRegions.size() &&
|
||||
!i.isMetaRegion()) {
|
||||
// Can't assign user regions until all meta regions have been assigned
|
||||
// and are on-line
|
||||
continue;
|
||||
}
|
||||
// If the last attempt to open this region was pretty recent, then we
|
||||
// don't want to try and assign it.
|
||||
long diff = now - e.getValue().longValue();
|
||||
if (diff > master.maxRegionOpenTime) {
|
||||
regionsToAssign.add(e.getKey());
|
||||
}
|
||||
}
|
||||
}
|
||||
return regionsToAssign;
|
||||
|
@ -434,7 +429,9 @@ class RegionManager implements HConstants {
|
|||
* @return Read-only map of online regions.
|
||||
*/
|
||||
public Map<Text, MetaRegion> getOnlineMetaRegions() {
|
||||
return Collections.unmodifiableSortedMap(onlineMetaRegions);
|
||||
synchronized (onlineMetaRegions) {
|
||||
return new TreeMap<Text, MetaRegion>(onlineMetaRegions);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -695,18 +692,27 @@ class RegionManager implements HConstants {
|
|||
* @param map map of region names to region infos of regions to close
|
||||
*/
|
||||
public void markToCloseBulk(String serverName,
|
||||
Map<Text, HRegionInfo> map) {
|
||||
regionsToClose.put(serverName, map);
|
||||
Map<Text, HRegionInfo> map) {
|
||||
synchronized (regionsToClose) {
|
||||
Map<Text, HRegionInfo> regions = regionsToClose.get(serverName);
|
||||
if (regions != null) {
|
||||
regions.putAll(map);
|
||||
} else {
|
||||
regions = map;
|
||||
}
|
||||
regionsToClose.put(serverName, regions);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a map of region names to region infos waiting to be offlined for a
|
||||
* given server
|
||||
* Remove the map of region names to region infos waiting to be offlined for a
|
||||
* given server
|
||||
*
|
||||
* @param serverName
|
||||
* @return map of region names to region infos to close
|
||||
*/
|
||||
public Map<Text, HRegionInfo> getMarkedToClose(String serverName) {
|
||||
return regionsToClose.get(serverName);
|
||||
public Map<Text, HRegionInfo> removeMarkedToClose(String serverName) {
|
||||
return regionsToClose.remove(serverName);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -737,6 +743,15 @@ class RegionManager implements HConstants {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Called when all regions for a particular server have been closed
|
||||
*
|
||||
* @param serverName
|
||||
*/
|
||||
public void allRegionsClosed(String serverName) {
|
||||
regionsToClose.remove(serverName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a region is closing
|
||||
* @param regionName
|
||||
|
@ -745,7 +760,7 @@ class RegionManager implements HConstants {
|
|||
public boolean isClosing(Text regionName) {
|
||||
return closingRegions.contains(regionName);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Set a region as no longer closing (closed?)
|
||||
* @param regionName
|
||||
|
@ -771,22 +786,6 @@ class RegionManager implements HConstants {
|
|||
metaScannerThread.addMetaRegionToScan(m);
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark a region as to be deleted
|
||||
* @param regionName
|
||||
*/
|
||||
public void markRegionForDeletion(Text regionName) {
|
||||
regionsToDelete.add(regionName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Note that a region to delete has been deleted
|
||||
* @param regionName
|
||||
*/
|
||||
public void regionDeleted(Text regionName) {
|
||||
regionsToDelete.remove(regionName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Note that a region should be offlined as soon as its closed.
|
||||
* @param regionName
|
||||
|
@ -812,15 +811,6 @@ class RegionManager implements HConstants {
|
|||
regionsToOffline.remove(regionName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a region is marked for deletion
|
||||
* @param regionName
|
||||
* @return true if marked for deletion, false otherwise
|
||||
*/
|
||||
public boolean isMarkedForDeletion(Text regionName) {
|
||||
return regionsToDelete.contains(regionName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if the initial root scan has been completed.
|
||||
* @return true if scan completed, false otherwise
|
||||
|
|
|
@ -28,7 +28,10 @@ import java.util.concurrent.Callable;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.hbase.InvalidColumnNameException;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||
import org.apache.hadoop.hbase.TableNotDisabledException;
|
||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
|
||||
/**
|
||||
|
@ -58,6 +61,11 @@ abstract class RetryableMetaOperation<T> implements Callable<T> {
|
|||
this.server = master.connection.getHRegionConnection(m.getServer());
|
||||
return this.call();
|
||||
} catch (IOException e) {
|
||||
if (e instanceof TableNotFoundException ||
|
||||
e instanceof TableNotDisabledException ||
|
||||
e instanceof InvalidColumnNameException) {
|
||||
throw e;
|
||||
}
|
||||
if (e instanceof RemoteException) {
|
||||
e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e);
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
|
@ -101,10 +102,12 @@ class ServerManager implements HConstants {
|
|||
if (load != null) {
|
||||
// The startup message was from a known server.
|
||||
// Remove stale information about the server's load.
|
||||
Set<String> servers = loadToServers.get(load);
|
||||
if (servers != null) {
|
||||
servers.remove(s);
|
||||
loadToServers.put(load, servers);
|
||||
synchronized (loadToServers) {
|
||||
Set<String> servers = loadToServers.get(load);
|
||||
if (servers != null) {
|
||||
servers.remove(s);
|
||||
loadToServers.put(load, servers);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -124,12 +127,14 @@ class ServerManager implements HConstants {
|
|||
serverInfo.setLoad(load);
|
||||
serversToServerInfo.put(s, serverInfo);
|
||||
serversToLoad.put(s, load);
|
||||
Set<String> servers = loadToServers.get(load);
|
||||
if (servers == null) {
|
||||
servers = new HashSet<String>();
|
||||
synchronized (loadToServers) {
|
||||
Set<String> servers = loadToServers.get(load);
|
||||
if (servers == null) {
|
||||
servers = new HashSet<String>();
|
||||
}
|
||||
servers.add(s);
|
||||
loadToServers.put(load, servers);
|
||||
}
|
||||
servers.add(s);
|
||||
loadToServers.put(load, servers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -247,7 +252,10 @@ class ServerManager implements HConstants {
|
|||
master.regionManager.offlineMetaRegion(info.getStartKey());
|
||||
}
|
||||
|
||||
master.regionManager.setUnassigned(info);
|
||||
if (!master.regionManager.isMarkedToClose(
|
||||
serverName, info.getRegionName())) {
|
||||
master.regionManager.setUnassigned(info);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -277,24 +285,27 @@ class ServerManager implements HConstants {
|
|||
if (load != null && !load.equals(serverInfo.getLoad())) {
|
||||
// We have previous information about the load on this server
|
||||
// and the load on this server has changed
|
||||
Set<String> servers = loadToServers.get(load);
|
||||
synchronized (loadToServers) {
|
||||
Set<String> servers = loadToServers.get(load);
|
||||
|
||||
// Note that servers should never be null because loadToServers
|
||||
// and serversToLoad are manipulated in pairs
|
||||
servers.remove(serverName);
|
||||
loadToServers.put(load, servers);
|
||||
// Note that servers should never be null because loadToServers
|
||||
// and serversToLoad are manipulated in pairs
|
||||
servers.remove(serverName);
|
||||
loadToServers.put(load, servers);
|
||||
}
|
||||
}
|
||||
|
||||
// Set the current load information
|
||||
load = serverInfo.getLoad();
|
||||
serversToLoad.put(serverName, load);
|
||||
Set<String> servers = loadToServers.get(load);
|
||||
if (servers == null) {
|
||||
servers = new HashSet<String>();
|
||||
synchronized (loadToServers) {
|
||||
Set<String> servers = loadToServers.get(load);
|
||||
if (servers == null) {
|
||||
servers = new HashSet<String>();
|
||||
}
|
||||
servers.add(serverName);
|
||||
loadToServers.put(load, servers);
|
||||
}
|
||||
servers.add(serverName);
|
||||
loadToServers.put(load, servers);
|
||||
|
||||
// Next, process messages for this server
|
||||
return processMsgs(serverName, serverInfo, mostLoadedRegions, msgs);
|
||||
}
|
||||
|
@ -310,7 +321,7 @@ class ServerManager implements HConstants {
|
|||
throws IOException {
|
||||
ArrayList<HMsg> returnMsgs = new ArrayList<HMsg>();
|
||||
Map<Text, HRegionInfo> regionsToKill =
|
||||
master.regionManager.getMarkedToClose(serverName);
|
||||
master.regionManager.removeMarkedToClose(serverName);
|
||||
|
||||
// Get reports on what the RegionServer did.
|
||||
for (int i = 0; i < incomingMsgs.length; i++) {
|
||||
|
@ -351,7 +362,6 @@ class ServerManager implements HConstants {
|
|||
returnMsgs.add(new HMsg(HMsg.MSG_REGION_CLOSE, i));
|
||||
// Transition the region from toClose to closing state
|
||||
master.regionManager.setClosing(i.getRegionName());
|
||||
master.regionManager.noLongerMarkedToClose(serverName, i.getRegionName());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -475,27 +485,19 @@ class ServerManager implements HConstants {
|
|||
|
||||
} else {
|
||||
boolean reassignRegion = !region.isOffline();
|
||||
boolean deleteRegion = false;
|
||||
boolean offlineRegion = false;
|
||||
|
||||
|
||||
// either this region is being closed because it was marked to close, or
|
||||
// the region server is going down peacefully. in either case, we should
|
||||
// at least try to remove it from the closing list.
|
||||
master.regionManager.noLongerClosing(region.getRegionName());
|
||||
|
||||
// if the region is marked to be offlined, we don't want to reassign
|
||||
// it.
|
||||
|
||||
// if the region is marked to be offlined, we don't want to reassign it.
|
||||
if (master.regionManager.isMarkedForOffline(region.getRegionName())) {
|
||||
reassignRegion = false;
|
||||
offlineRegion = true;
|
||||
}
|
||||
|
||||
if (master.regionManager.isMarkedForDeletion(region.getRegionName())) {
|
||||
master.regionManager.regionDeleted(region.getRegionName());
|
||||
reassignRegion = false;
|
||||
deleteRegion = true;
|
||||
}
|
||||
|
||||
if (region.isMetaTable()) {
|
||||
// Region is part of the meta table. Remove it from onlineMetaRegions
|
||||
master.regionManager.offlineMetaRegion(region.getStartKey());
|
||||
|
@ -513,10 +515,10 @@ class ServerManager implements HConstants {
|
|||
// operations asynchronously, so we'll creating a todo item for that.
|
||||
try {
|
||||
master.toDoQueue.put(new ProcessRegionClose(master, region,
|
||||
offlineRegion, deleteRegion));
|
||||
offlineRegion));
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(
|
||||
"Putting into toDoQueue was interrupted.", e);
|
||||
"Putting into toDoQueue was interrupted.", e);
|
||||
}
|
||||
} else {
|
||||
// we are reassigning the region eventually, so set it unassigned
|
||||
|
@ -543,10 +545,12 @@ class ServerManager implements HConstants {
|
|||
// update load information
|
||||
HServerLoad load = serversToLoad.remove(serverName);
|
||||
if (load != null) {
|
||||
Set<String> servers = loadToServers.get(load);
|
||||
if (servers != null) {
|
||||
servers.remove(serverName);
|
||||
loadToServers.put(load, servers);
|
||||
synchronized (loadToServers) {
|
||||
Set<String> servers = loadToServers.get(load);
|
||||
if (servers != null) {
|
||||
servers.remove(serverName);
|
||||
loadToServers.put(load, servers);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -567,8 +571,8 @@ class ServerManager implements HConstants {
|
|||
synchronized (serversToLoad) {
|
||||
numServers = serversToLoad.size();
|
||||
|
||||
for (Map.Entry<String, HServerLoad> entry : serversToLoad.entrySet()) {
|
||||
totalLoad += entry.getValue().getNumberOfRegions();
|
||||
for (HServerLoad load : serversToLoad.values()) {
|
||||
totalLoad += load.getNumberOfRegions();
|
||||
}
|
||||
|
||||
averageLoad = Math.ceil((double)totalLoad / (double)numServers);
|
||||
|
@ -597,21 +601,27 @@ class ServerManager implements HConstants {
|
|||
* @return Read-only map of servers to serverinfo.
|
||||
*/
|
||||
public Map<String, HServerInfo> getServersToServerInfo() {
|
||||
return Collections.unmodifiableMap(serversToServerInfo);
|
||||
synchronized (serversToServerInfo) {
|
||||
return new HashMap<String, HServerInfo>(serversToServerInfo);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Read-only map of servers to load.
|
||||
*/
|
||||
public Map<String, HServerLoad> getServersToLoad() {
|
||||
return Collections.unmodifiableMap(serversToLoad);
|
||||
synchronized (serversToLoad) {
|
||||
return new HashMap<String, HServerLoad>(serversToLoad);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Read-only map of load to servers.
|
||||
*/
|
||||
public Map<HServerLoad, Set<String>> getLoadToServers() {
|
||||
return Collections.unmodifiableMap(loadToServers);
|
||||
synchronized (loadToServers) {
|
||||
return new HashMap<HServerLoad, Set<String>>(loadToServers);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -670,10 +680,12 @@ class ServerManager implements HConstants {
|
|||
String serverName = info.getServerAddress().toString();
|
||||
HServerLoad load = serversToLoad.remove(serverName);
|
||||
if (load != null) {
|
||||
Set<String> servers = loadToServers.get(load);
|
||||
if (servers != null) {
|
||||
servers.remove(serverName);
|
||||
loadToServers.put(load, servers);
|
||||
synchronized (loadToServers) {
|
||||
Set<String> servers = loadToServers.get(load);
|
||||
if (servers != null) {
|
||||
servers.remove(serverName);
|
||||
loadToServers.put(load, servers);
|
||||
}
|
||||
}
|
||||
}
|
||||
deadServers.add(server);
|
||||
|
|
|
@ -20,42 +20,44 @@
|
|||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashSet;
|
||||
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
import org.apache.hadoop.hbase.TableNotDisabledException;
|
||||
import org.apache.hadoop.io.Text;
|
||||
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
|
||||
/**
|
||||
* Instantiated to delete a table
|
||||
* Note that it extends ChangeTableState, which takes care of disabling
|
||||
* the table.
|
||||
* Instantiated to delete a table. Table must be offline.
|
||||
*/
|
||||
class TableDelete extends ChangeTableState {
|
||||
class TableDelete extends TableOperation {
|
||||
|
||||
TableDelete(final HMaster master, final Text tableName) throws IOException {
|
||||
super(master, tableName, false);
|
||||
super(master, tableName);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void processScanItem(
|
||||
@SuppressWarnings("unused") String serverName,
|
||||
@SuppressWarnings("unused") long startCode,
|
||||
final HRegionInfo info) throws IOException {
|
||||
|
||||
if (isEnabled(info)) {
|
||||
throw new TableNotDisabledException(tableName.toString());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void postProcessMeta(MetaRegion m, HRegionInterface server)
|
||||
throws IOException {
|
||||
// For regions that are being served, mark them for deletion
|
||||
for (HashSet<HRegionInfo> s: servedRegions.values()) {
|
||||
for (HRegionInfo i: s) {
|
||||
master.regionManager.markRegionForDeletion(i.getRegionName());
|
||||
}
|
||||
}
|
||||
|
||||
// Unserved regions we can delete now
|
||||
for (HRegionInfo i: unservedRegions) {
|
||||
// Delete the region
|
||||
try {
|
||||
HRegion.removeRegionFromMETA(server, m.getRegionName(), i.getRegionName());
|
||||
HRegion.deleteRegion(this.master.fs, this.master.rootdir, i);
|
||||
|
||||
} catch (IOException e) {
|
||||
|
@ -63,18 +65,9 @@ class TableDelete extends ChangeTableState {
|
|||
RemoteExceptionHandler.checkIOException(e));
|
||||
}
|
||||
}
|
||||
super.postProcessMeta(m, server);
|
||||
|
||||
// delete the table's folder from fs.
|
||||
master.fs.delete(new Path(master.rootdir, tableName.toString()));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void updateRegionInfo(BatchUpdate b,
|
||||
@SuppressWarnings("unused") HRegionInfo info) {
|
||||
for (int i = 0; i < ALL_META_COLUMNS.length; i++) {
|
||||
// Be sure to clean all cells
|
||||
b.delete(ALL_META_COLUMNS[i]);
|
||||
}
|
||||
FileUtil.fullyDelete(master.fs,
|
||||
new Path(master.rootdir, tableName.toString()));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
|||
import org.apache.hadoop.hbase.HServerInfo;
|
||||
import org.apache.hadoop.hbase.MasterNotRunningException;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||
import org.apache.hadoop.hbase.util.Writables;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.hbase.io.RowResult;
|
||||
|
@ -46,8 +47,7 @@ import org.apache.hadoop.hbase.util.Sleeper;
|
|||
abstract class TableOperation implements HConstants {
|
||||
static final Long ZERO_L = Long.valueOf(0L);
|
||||
|
||||
protected static final Log LOG =
|
||||
LogFactory.getLog(TableOperation.class.getName());
|
||||
protected static final Log LOG = LogFactory.getLog(TableOperation.class);
|
||||
|
||||
protected Set<MetaRegion> metaRegions;
|
||||
protected Text tableName;
|
||||
|
@ -104,8 +104,8 @@ abstract class TableOperation implements HConstants {
|
|||
HRegionInfo info = this.master.getHRegionInfo(values.getRow(), values);
|
||||
if (info == null) {
|
||||
emptyRows.add(values.getRow());
|
||||
throw new IOException(COL_REGIONINFO + " not found on " +
|
||||
values.getRow());
|
||||
LOG.error(COL_REGIONINFO + " not found on " + values.getRow());
|
||||
continue;
|
||||
}
|
||||
String serverName = Writables.cellToString(values.get(COL_SERVER));
|
||||
long startCode = Writables.cellToLong(values.get(COL_STARTCODE));
|
||||
|
@ -141,7 +141,7 @@ abstract class TableOperation implements HConstants {
|
|||
}
|
||||
|
||||
if (!tableExists) {
|
||||
throw new IOException(tableName + " does not exist");
|
||||
throw new TableNotFoundException(tableName + " does not exist");
|
||||
}
|
||||
|
||||
postProcessMeta(m, server);
|
||||
|
|
|
@ -575,8 +575,9 @@ class Memcache {
|
|||
* equal or older timestamp. If no keys, returns an empty List. Does not
|
||||
* return null.
|
||||
*/
|
||||
private List<HStoreKey> internalGetKeys(final SortedMap<HStoreKey, byte []> map,
|
||||
final HStoreKey origin, final int versions) {
|
||||
private List<HStoreKey> internalGetKeys(
|
||||
final SortedMap<HStoreKey, byte []> map, final HStoreKey origin,
|
||||
final int versions) {
|
||||
|
||||
long now = System.currentTimeMillis();
|
||||
List<HStoreKey> result = new ArrayList<HStoreKey>();
|
||||
|
@ -681,6 +682,7 @@ class Memcache {
|
|||
}
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
@Override
|
||||
public boolean next(HStoreKey key, SortedMap<Text, byte []> results)
|
||||
throws IOException {
|
||||
|
@ -704,7 +706,12 @@ class Memcache {
|
|||
}
|
||||
key.setRow(this.currentRow);
|
||||
key.setVersion(this.timestamp);
|
||||
getFull(key, isWildcardScanner()? null: this.columns, deletes, rowResults);
|
||||
getFull(key, isWildcardScanner() ? null : this.columns, deletes,
|
||||
rowResults);
|
||||
for (Map.Entry<Text, Long> e: deletes.entrySet()) {
|
||||
rowResults.put(e.getKey(),
|
||||
new Cell(HLogEdit.deleteBytes.get(), e.getValue()));
|
||||
}
|
||||
for (Map.Entry<Text, Cell> e: rowResults.entrySet()) {
|
||||
Text column = e.getKey();
|
||||
Cell c = e.getValue();
|
||||
|
@ -722,6 +729,7 @@ class Memcache {
|
|||
return results.size() > 0;
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public void close() {
|
||||
if (!scannerClosed) {
|
||||
scannerClosed = true;
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.apache.hadoop.hbase;
|
|||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -73,7 +72,6 @@ public class TestHBaseCluster extends HBaseClusterTestCase {
|
|||
basic();
|
||||
scanner();
|
||||
listTables();
|
||||
cleanup();
|
||||
}
|
||||
|
||||
private static final int FIRST_ROW = 1;
|
||||
|
@ -204,11 +202,4 @@ public class TestHBaseCluster extends HBaseClusterTestCase {
|
|||
assertTrue(families.contains(new Text(CONTENTS)));
|
||||
assertTrue(families.contains(new Text(ANCHOR)));
|
||||
}
|
||||
|
||||
private void cleanup() throws IOException {
|
||||
|
||||
// Delete the table we created
|
||||
|
||||
admin.deleteTable(desc.getName());
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue