HBASE-478 offlining of table does not run reliably

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@654193 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jim Kellerman 2008-05-07 17:56:06 +00:00
parent cfefc60bac
commit b3f4ddc1bc
13 changed files with 177 additions and 204 deletions

View File

@ -33,6 +33,7 @@ Hbase Change Log
HBASE-405 TIF and TOF use log4j directly rather than apache commons-logging HBASE-405 TIF and TOF use log4j directly rather than apache commons-logging
HBASE-618 We always compact if 2 files, regardless of the compaction threshold setting HBASE-618 We always compact if 2 files, regardless of the compaction threshold setting
HBASE-619 Fix 'logs' link in UI HBASE-619 Fix 'logs' link in UI
HBASE-478 offlining of table does not run reliably
IMPROVEMENTS IMPROVEMENTS
HBASE-559 MR example job to count table rows HBASE-559 MR example job to count table rows

View File

@ -357,10 +357,10 @@ abstract class BaseScanner extends Chore implements HConstants {
// Skip region - if ... // Skip region - if ...
if(info.isOffline() // offline if(info.isOffline() // offline
|| regionManager.isClosing(info.getRegionName()) // queued for offline || regionManager.isClosing(info.getRegionName())) { // queued for offline
|| regionManager.isMarkedForDeletion(info.getRegionName())) { // queued for delete
regionManager.noLongerUnassigned(info); regionManager.noLongerUnassigned(info);
regionManager.noLongerPending(info.getRegionName());
return; return;
} }
HServerInfo storedInfo = null; HServerInfo storedInfo = null;

View File

@ -71,8 +71,8 @@ class ChangeTableState extends TableOperation {
for (HRegionInfo i: unservedRegions) { for (HRegionInfo i: unservedRegions) {
if (i.isOffline() && i.isSplit()) { if (i.isOffline() && i.isSplit()) {
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("Skipping region " + i.toString() + " because it is " + LOG.debug("Skipping region " + i.toString() +
"offline because it has been split"); " because it is offline because it has been split");
} }
continue; continue;
} }
@ -94,6 +94,7 @@ class ChangeTableState extends TableOperation {
if (online) { if (online) {
// Bring offline regions on-line // Bring offline regions on-line
master.regionManager.noLongerClosing(i.getRegionName());
if (!master.regionManager.isUnassigned(i)) { if (!master.regionManager.isUnassigned(i)) {
master.regionManager.setUnassigned(i); master.regionManager.setUnassigned(i);
} }
@ -120,22 +121,20 @@ class ChangeTableState extends TableOperation {
HashMap<Text, HRegionInfo> localKillList = HashMap<Text, HRegionInfo> localKillList =
new HashMap<Text, HRegionInfo>(); new HashMap<Text, HRegionInfo>();
Map<Text, HRegionInfo> killedRegions =
master.regionManager.getMarkedToClose(serverName);
if (killedRegions != null) {
localKillList.putAll(killedRegions);
}
for (HRegionInfo i: e.getValue()) { for (HRegionInfo i: e.getValue()) {
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("adding region " + i.getRegionName() + LOG.debug("adding region " + i.getRegionName() + " to kill list");
" to kill list");
} }
// this marks the regions to be closed // this marks the regions to be closed
localKillList.put(i.getRegionName(), i); localKillList.put(i.getRegionName(), i);
// this marks the regions to be offlined once they are closed // this marks the regions to be offlined once they are closed
master.regionManager.markRegionForOffline(i.getRegionName()); master.regionManager.markRegionForOffline(i.getRegionName());
} }
Map<Text, HRegionInfo> killedRegions =
master.regionManager.removeMarkedToClose(serverName);
if (killedRegions != null) {
localKillList.putAll(killedRegions);
}
if (localKillList.size() > 0) { if (localKillList.size() > 0) {
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("inserted local kill list into kill list for server " + LOG.debug("inserted local kill list into kill list for server " +

View File

@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.master;
import java.util.Map; import java.util.Map;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.InvalidColumnNameException;
import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
@ -51,9 +52,8 @@ class ModifyColumn extends ColumnOperation {
if (families.get(columnName) != null){ if (families.get(columnName) != null){
families.put(columnName, descriptor); families.put(columnName, descriptor);
updateRegionInfo(server, m.getRegionName(), i); updateRegionInfo(server, m.getRegionName(), i);
} } else{ // otherwise, we have an error.
else{ // otherwise, we have an error. throw new InvalidColumnNameException("Column family '" + columnName +
throw new IOException("Column family '" + columnName +
"' doesn't exist, so cannot be modified."); "' doesn't exist, so cannot be modified.");
} }
} }

View File

@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.master;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
@ -35,28 +34,25 @@ import org.apache.hadoop.hbase.HRegionInfo;
*/ */
class ProcessRegionClose extends ProcessRegionStatusChange { class ProcessRegionClose extends ProcessRegionStatusChange {
protected final boolean offlineRegion; protected final boolean offlineRegion;
protected final boolean deleteRegion;
/** /**
* @param master * @param master
* @param regionInfo Region to operate on * @param regionInfo Region to operate on
* @param offlineRegion if true, set the region to offline in meta * @param offlineRegion if true, set the region to offline in meta
* @param deleteRegion if true, delete the region row from meta and then
* delete the region files from disk. * delete the region files from disk.
*/ */
public ProcessRegionClose(HMaster master, HRegionInfo regionInfo, public ProcessRegionClose(HMaster master, HRegionInfo regionInfo,
boolean offlineRegion, boolean deleteRegion) { boolean offlineRegion) {
super(master, regionInfo); super(master, regionInfo);
this.offlineRegion = offlineRegion; this.offlineRegion = offlineRegion;
this.deleteRegion = deleteRegion;
} }
/** {@inheritDoc} */ /** {@inheritDoc} */
@Override @Override
public String toString() { public String toString() {
return "ProcessRegionClose of " + this.regionInfo.getRegionName() + return "ProcessRegionClose of " + this.regionInfo.getRegionName() +
", " + this.offlineRegion + ", " + this.deleteRegion; ", " + this.offlineRegion;
} }
@Override @Override
@ -76,10 +72,7 @@ class ProcessRegionClose extends ProcessRegionStatusChange {
return true; return true;
} }
if (deleteRegion) { if (offlineRegion) {
HRegion.removeRegionFromMETA(server, metaRegionName,
regionInfo.getRegionName());
} else if (offlineRegion) {
// offline the region in meta and then note that we've offlined the // offline the region in meta and then note that we've offlined the
// region. // region.
HRegion.offlineRegionInMETA(server, metaRegionName, HRegion.offlineRegionInMETA(server, metaRegionName,
@ -90,17 +83,6 @@ class ProcessRegionClose extends ProcessRegionStatusChange {
} }
}.doWithRetries(); }.doWithRetries();
// now that meta is updated, if we need to delete the region's files, now's
// the time.
if (deleteRegion) {
try {
HRegion.deleteRegion(master.fs, master.rootdir, regionInfo);
} catch (IOException e) {
e = RemoteExceptionHandler.checkIOException(e);
LOG.error("failed delete region " + regionInfo.getRegionName(), e);
throw e;
}
}
return result == null ? true : result; return result == null ? true : result;
} }
} }

View File

@ -52,13 +52,11 @@ class ProcessServerShutdown extends RegionServerOperation {
private boolean rootRescanned; private boolean rootRescanned;
private class ToDoEntry { private class ToDoEntry {
boolean deleteRegion;
boolean regionOffline; boolean regionOffline;
Text row; Text row;
HRegionInfo info; HRegionInfo info;
ToDoEntry(Text row, HRegionInfo info) { ToDoEntry(Text row, HRegionInfo info) {
this.deleteRegion = false;
this.regionOffline = false; this.regionOffline = false;
this.row = row; this.row = row;
this.info = info; this.info = info;
@ -154,22 +152,14 @@ class ProcessServerShutdown extends RegionServerOperation {
if (master.regionManager.isMarkedToClose(deadServerName, info.getRegionName())) { if (master.regionManager.isMarkedToClose(deadServerName, info.getRegionName())) {
master.regionManager.noLongerMarkedToClose(deadServerName, info.getRegionName()); master.regionManager.noLongerMarkedToClose(deadServerName, info.getRegionName());
master.regionManager.noLongerUnassigned(info); master.regionManager.noLongerUnassigned(info);
if (master.regionManager.isMarkedForDeletion(info.getRegionName())) { // Mark region offline
// Delete this region todo.regionOffline = true;
master.regionManager.regionDeleted(info.getRegionName());
todo.deleteRegion = true;
} else {
// Mark region offline
todo.regionOffline = true;
}
} else { } else {
// Get region reassigned // Get region reassigned
regions.add(info); regions.add(info);
// If it was pending, remove.
// Otherwise will obstruct its getting reassigned.
master.regionManager.noLongerPending(info.getRegionName());
} }
// If it was pending, remove.
master.regionManager.noLongerPending(info.getRegionName());
} }
} finally { } finally {
if(scannerId != -1L) { if(scannerId != -1L) {
@ -192,9 +182,7 @@ class ProcessServerShutdown extends RegionServerOperation {
} }
// Update server in root/meta entries // Update server in root/meta entries
for (ToDoEntry e: toDoList) { for (ToDoEntry e: toDoList) {
if (e.deleteRegion) { if (e.regionOffline) {
HRegion.removeRegionFromMETA(server, regionName, e.row);
} else if (e.regionOffline) {
HRegion.offlineRegionInMETA(server, regionName, e.info); HRegion.offlineRegionInMETA(server, regionName, e.info);
} }
} }
@ -314,6 +302,7 @@ class ProcessServerShutdown extends RegionServerOperation {
r.getRegionName() + " on " + r.getServer()); r.getRegionName() + " on " + r.getServer());
} }
} }
master.regionManager.allRegionsClosed(deadServerName);
master.serverManager.removeDeadServer(deadServerName); master.serverManager.removeDeadServer(deadServerName);
return true; return true;
} }

View File

@ -53,7 +53,7 @@ import org.apache.hadoop.hbase.util.Writables;
* Class to manage assigning regions to servers, state of root and meta, etc. * Class to manage assigning regions to servers, state of root and meta, etc.
*/ */
class RegionManager implements HConstants { class RegionManager implements HConstants {
protected static final Log LOG = LogFactory.getLog(RegionManager.class.getName()); protected static final Log LOG = LogFactory.getLog(RegionManager.class);
private volatile AtomicReference<HServerAddress> rootRegionLocation = private volatile AtomicReference<HServerAddress> rootRegionLocation =
new AtomicReference<HServerAddress>(null); new AtomicReference<HServerAddress>(null);
@ -103,13 +103,6 @@ class RegionManager implements HConstants {
private final Set<Text> closingRegions = private final Set<Text> closingRegions =
Collections.synchronizedSet(new HashSet<Text>()); Collections.synchronizedSet(new HashSet<Text>());
/**
* 'regionsToDelete' contains regions that need to be deleted, but cannot be
* until the region server closes it
*/
private final Set<Text> regionsToDelete =
Collections.synchronizedSet(new HashSet<Text>());
/** /**
* Set of regions that, once closed, should be marked as offline so that they * Set of regions that, once closed, should be marked as offline so that they
* are not reassigned. * are not reassigned.
@ -119,7 +112,7 @@ class RegionManager implements HConstants {
// How many regions to assign a server at a time. // How many regions to assign a server at a time.
private final int maxAssignInOneGo; private final int maxAssignInOneGo;
private HMaster master; private final HMaster master;
RegionManager(HMaster master) { RegionManager(HMaster master) {
this.master = master; this.master = master;
@ -199,8 +192,8 @@ class RegionManager implements HConstants {
} else { } else {
// otherwise, give this server a few regions taking into account the // otherwise, give this server a few regions taking into account the
// load of all the other servers. // load of all the other servers.
assignRegionsToMultipleServers(thisServersLoad, regionsToAssign, assignRegionsToMultipleServers(thisServersLoad, regionsToAssign,
serverName, returnMsgs); serverName, returnMsgs);
} }
} }
} }
@ -320,19 +313,21 @@ class RegionManager implements HConstants {
// Look over the set of regions that aren't currently assigned to // Look over the set of regions that aren't currently assigned to
// determine which we should assign to this server. // determine which we should assign to this server.
for (Map.Entry<HRegionInfo, Long> e: unassignedRegions.entrySet()) { synchronized (unassignedRegions) { //must synchronize when iterating
HRegionInfo i = e.getKey(); for (Map.Entry<HRegionInfo, Long> e: unassignedRegions.entrySet()) {
if (numberOfMetaRegions.get() != onlineMetaRegions.size() && HRegionInfo i = e.getKey();
!i.isMetaRegion()) { if (numberOfMetaRegions.get() != onlineMetaRegions.size() &&
// Can't assign user regions until all meta regions have been assigned !i.isMetaRegion()) {
// and are on-line // Can't assign user regions until all meta regions have been assigned
continue; // and are on-line
} continue;
// If the last attempt to open this region was pretty recent, then we }
// don't want to try and assign it. // If the last attempt to open this region was pretty recent, then we
long diff = now - e.getValue().longValue(); // don't want to try and assign it.
if (diff > master.maxRegionOpenTime) { long diff = now - e.getValue().longValue();
regionsToAssign.add(e.getKey()); if (diff > master.maxRegionOpenTime) {
regionsToAssign.add(e.getKey());
}
} }
} }
return regionsToAssign; return regionsToAssign;
@ -434,7 +429,9 @@ class RegionManager implements HConstants {
* @return Read-only map of online regions. * @return Read-only map of online regions.
*/ */
public Map<Text, MetaRegion> getOnlineMetaRegions() { public Map<Text, MetaRegion> getOnlineMetaRegions() {
return Collections.unmodifiableSortedMap(onlineMetaRegions); synchronized (onlineMetaRegions) {
return new TreeMap<Text, MetaRegion>(onlineMetaRegions);
}
} }
/** /**
@ -695,18 +692,27 @@ class RegionManager implements HConstants {
* @param map map of region names to region infos of regions to close * @param map map of region names to region infos of regions to close
*/ */
public void markToCloseBulk(String serverName, public void markToCloseBulk(String serverName,
Map<Text, HRegionInfo> map) { Map<Text, HRegionInfo> map) {
regionsToClose.put(serverName, map); synchronized (regionsToClose) {
Map<Text, HRegionInfo> regions = regionsToClose.get(serverName);
if (regions != null) {
regions.putAll(map);
} else {
regions = map;
}
regionsToClose.put(serverName, regions);
}
} }
/** /**
* Get a map of region names to region infos waiting to be offlined for a * Remove the map of region names to region infos waiting to be offlined for a
* given server * given server
*
* @param serverName * @param serverName
* @return map of region names to region infos to close * @return map of region names to region infos to close
*/ */
public Map<Text, HRegionInfo> getMarkedToClose(String serverName) { public Map<Text, HRegionInfo> removeMarkedToClose(String serverName) {
return regionsToClose.get(serverName); return regionsToClose.remove(serverName);
} }
/** /**
@ -737,6 +743,15 @@ class RegionManager implements HConstants {
} }
} }
/**
* Called when all regions for a particular server have been closed
*
* @param serverName
*/
public void allRegionsClosed(String serverName) {
regionsToClose.remove(serverName);
}
/** /**
* Check if a region is closing * Check if a region is closing
* @param regionName * @param regionName
@ -771,22 +786,6 @@ class RegionManager implements HConstants {
metaScannerThread.addMetaRegionToScan(m); metaScannerThread.addMetaRegionToScan(m);
} }
/**
* Mark a region as to be deleted
* @param regionName
*/
public void markRegionForDeletion(Text regionName) {
regionsToDelete.add(regionName);
}
/**
* Note that a region to delete has been deleted
* @param regionName
*/
public void regionDeleted(Text regionName) {
regionsToDelete.remove(regionName);
}
/** /**
* Note that a region should be offlined as soon as its closed. * Note that a region should be offlined as soon as its closed.
* @param regionName * @param regionName
@ -812,15 +811,6 @@ class RegionManager implements HConstants {
regionsToOffline.remove(regionName); regionsToOffline.remove(regionName);
} }
/**
* Check if a region is marked for deletion
* @param regionName
* @return true if marked for deletion, false otherwise
*/
public boolean isMarkedForDeletion(Text regionName) {
return regionsToDelete.contains(regionName);
}
/** /**
* Check if the initial root scan has been completed. * Check if the initial root scan has been completed.
* @return true if scan completed, false otherwise * @return true if scan completed, false otherwise

View File

@ -28,7 +28,10 @@ import java.util.concurrent.Callable;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.hbase.InvalidColumnNameException;
import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.ipc.HRegionInterface;
/** /**
@ -58,6 +61,11 @@ abstract class RetryableMetaOperation<T> implements Callable<T> {
this.server = master.connection.getHRegionConnection(m.getServer()); this.server = master.connection.getHRegionConnection(m.getServer());
return this.call(); return this.call();
} catch (IOException e) { } catch (IOException e) {
if (e instanceof TableNotFoundException ||
e instanceof TableNotDisabledException ||
e instanceof InvalidColumnNameException) {
throw e;
}
if (e instanceof RemoteException) { if (e instanceof RemoteException) {
e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e); e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e);
} }

View File

@ -20,6 +20,7 @@
package org.apache.hadoop.hbase.master; package org.apache.hadoop.hbase.master;
import java.io.IOException; import java.io.IOException;
import java.util.HashMap;
import java.util.Map; import java.util.Map;
import java.util.SortedMap; import java.util.SortedMap;
import java.util.TreeMap; import java.util.TreeMap;
@ -101,10 +102,12 @@ class ServerManager implements HConstants {
if (load != null) { if (load != null) {
// The startup message was from a known server. // The startup message was from a known server.
// Remove stale information about the server's load. // Remove stale information about the server's load.
Set<String> servers = loadToServers.get(load); synchronized (loadToServers) {
if (servers != null) { Set<String> servers = loadToServers.get(load);
servers.remove(s); if (servers != null) {
loadToServers.put(load, servers); servers.remove(s);
loadToServers.put(load, servers);
}
} }
} }
@ -124,12 +127,14 @@ class ServerManager implements HConstants {
serverInfo.setLoad(load); serverInfo.setLoad(load);
serversToServerInfo.put(s, serverInfo); serversToServerInfo.put(s, serverInfo);
serversToLoad.put(s, load); serversToLoad.put(s, load);
Set<String> servers = loadToServers.get(load); synchronized (loadToServers) {
if (servers == null) { Set<String> servers = loadToServers.get(load);
servers = new HashSet<String>(); if (servers == null) {
servers = new HashSet<String>();
}
servers.add(s);
loadToServers.put(load, servers);
} }
servers.add(s);
loadToServers.put(load, servers);
} }
/** /**
@ -247,7 +252,10 @@ class ServerManager implements HConstants {
master.regionManager.offlineMetaRegion(info.getStartKey()); master.regionManager.offlineMetaRegion(info.getStartKey());
} }
master.regionManager.setUnassigned(info); if (!master.regionManager.isMarkedToClose(
serverName, info.getRegionName())) {
master.regionManager.setUnassigned(info);
}
} }
} }
} }
@ -277,24 +285,27 @@ class ServerManager implements HConstants {
if (load != null && !load.equals(serverInfo.getLoad())) { if (load != null && !load.equals(serverInfo.getLoad())) {
// We have previous information about the load on this server // We have previous information about the load on this server
// and the load on this server has changed // and the load on this server has changed
Set<String> servers = loadToServers.get(load); synchronized (loadToServers) {
Set<String> servers = loadToServers.get(load);
// Note that servers should never be null because loadToServers // Note that servers should never be null because loadToServers
// and serversToLoad are manipulated in pairs // and serversToLoad are manipulated in pairs
servers.remove(serverName); servers.remove(serverName);
loadToServers.put(load, servers); loadToServers.put(load, servers);
}
} }
// Set the current load information // Set the current load information
load = serverInfo.getLoad(); load = serverInfo.getLoad();
serversToLoad.put(serverName, load); serversToLoad.put(serverName, load);
Set<String> servers = loadToServers.get(load); synchronized (loadToServers) {
if (servers == null) { Set<String> servers = loadToServers.get(load);
servers = new HashSet<String>(); if (servers == null) {
servers = new HashSet<String>();
}
servers.add(serverName);
loadToServers.put(load, servers);
} }
servers.add(serverName);
loadToServers.put(load, servers);
// Next, process messages for this server // Next, process messages for this server
return processMsgs(serverName, serverInfo, mostLoadedRegions, msgs); return processMsgs(serverName, serverInfo, mostLoadedRegions, msgs);
} }
@ -310,7 +321,7 @@ class ServerManager implements HConstants {
throws IOException { throws IOException {
ArrayList<HMsg> returnMsgs = new ArrayList<HMsg>(); ArrayList<HMsg> returnMsgs = new ArrayList<HMsg>();
Map<Text, HRegionInfo> regionsToKill = Map<Text, HRegionInfo> regionsToKill =
master.regionManager.getMarkedToClose(serverName); master.regionManager.removeMarkedToClose(serverName);
// Get reports on what the RegionServer did. // Get reports on what the RegionServer did.
for (int i = 0; i < incomingMsgs.length; i++) { for (int i = 0; i < incomingMsgs.length; i++) {
@ -351,7 +362,6 @@ class ServerManager implements HConstants {
returnMsgs.add(new HMsg(HMsg.MSG_REGION_CLOSE, i)); returnMsgs.add(new HMsg(HMsg.MSG_REGION_CLOSE, i));
// Transition the region from toClose to closing state // Transition the region from toClose to closing state
master.regionManager.setClosing(i.getRegionName()); master.regionManager.setClosing(i.getRegionName());
master.regionManager.noLongerMarkedToClose(serverName, i.getRegionName());
} }
} }
@ -475,7 +485,6 @@ class ServerManager implements HConstants {
} else { } else {
boolean reassignRegion = !region.isOffline(); boolean reassignRegion = !region.isOffline();
boolean deleteRegion = false;
boolean offlineRegion = false; boolean offlineRegion = false;
// either this region is being closed because it was marked to close, or // either this region is being closed because it was marked to close, or
@ -483,19 +492,12 @@ class ServerManager implements HConstants {
// at least try to remove it from the closing list. // at least try to remove it from the closing list.
master.regionManager.noLongerClosing(region.getRegionName()); master.regionManager.noLongerClosing(region.getRegionName());
// if the region is marked to be offlined, we don't want to reassign // if the region is marked to be offlined, we don't want to reassign it.
// it.
if (master.regionManager.isMarkedForOffline(region.getRegionName())) { if (master.regionManager.isMarkedForOffline(region.getRegionName())) {
reassignRegion = false; reassignRegion = false;
offlineRegion = true; offlineRegion = true;
} }
if (master.regionManager.isMarkedForDeletion(region.getRegionName())) {
master.regionManager.regionDeleted(region.getRegionName());
reassignRegion = false;
deleteRegion = true;
}
if (region.isMetaTable()) { if (region.isMetaTable()) {
// Region is part of the meta table. Remove it from onlineMetaRegions // Region is part of the meta table. Remove it from onlineMetaRegions
master.regionManager.offlineMetaRegion(region.getStartKey()); master.regionManager.offlineMetaRegion(region.getStartKey());
@ -513,10 +515,10 @@ class ServerManager implements HConstants {
// operations asynchronously, so we'll creating a todo item for that. // operations asynchronously, so we'll creating a todo item for that.
try { try {
master.toDoQueue.put(new ProcessRegionClose(master, region, master.toDoQueue.put(new ProcessRegionClose(master, region,
offlineRegion, deleteRegion)); offlineRegion));
} catch (InterruptedException e) { } catch (InterruptedException e) {
throw new RuntimeException( throw new RuntimeException(
"Putting into toDoQueue was interrupted.", e); "Putting into toDoQueue was interrupted.", e);
} }
} else { } else {
// we are reassigning the region eventually, so set it unassigned // we are reassigning the region eventually, so set it unassigned
@ -543,10 +545,12 @@ class ServerManager implements HConstants {
// update load information // update load information
HServerLoad load = serversToLoad.remove(serverName); HServerLoad load = serversToLoad.remove(serverName);
if (load != null) { if (load != null) {
Set<String> servers = loadToServers.get(load); synchronized (loadToServers) {
if (servers != null) { Set<String> servers = loadToServers.get(load);
servers.remove(serverName); if (servers != null) {
loadToServers.put(load, servers); servers.remove(serverName);
loadToServers.put(load, servers);
}
} }
} }
} }
@ -567,8 +571,8 @@ class ServerManager implements HConstants {
synchronized (serversToLoad) { synchronized (serversToLoad) {
numServers = serversToLoad.size(); numServers = serversToLoad.size();
for (Map.Entry<String, HServerLoad> entry : serversToLoad.entrySet()) { for (HServerLoad load : serversToLoad.values()) {
totalLoad += entry.getValue().getNumberOfRegions(); totalLoad += load.getNumberOfRegions();
} }
averageLoad = Math.ceil((double)totalLoad / (double)numServers); averageLoad = Math.ceil((double)totalLoad / (double)numServers);
@ -597,21 +601,27 @@ class ServerManager implements HConstants {
* @return Read-only map of servers to serverinfo. * @return Read-only map of servers to serverinfo.
*/ */
public Map<String, HServerInfo> getServersToServerInfo() { public Map<String, HServerInfo> getServersToServerInfo() {
return Collections.unmodifiableMap(serversToServerInfo); synchronized (serversToServerInfo) {
return new HashMap<String, HServerInfo>(serversToServerInfo);
}
} }
/** /**
* @return Read-only map of servers to load. * @return Read-only map of servers to load.
*/ */
public Map<String, HServerLoad> getServersToLoad() { public Map<String, HServerLoad> getServersToLoad() {
return Collections.unmodifiableMap(serversToLoad); synchronized (serversToLoad) {
return new HashMap<String, HServerLoad>(serversToLoad);
}
} }
/** /**
* @return Read-only map of load to servers. * @return Read-only map of load to servers.
*/ */
public Map<HServerLoad, Set<String>> getLoadToServers() { public Map<HServerLoad, Set<String>> getLoadToServers() {
return Collections.unmodifiableMap(loadToServers); synchronized (loadToServers) {
return new HashMap<HServerLoad, Set<String>>(loadToServers);
}
} }
/** /**
@ -670,10 +680,12 @@ class ServerManager implements HConstants {
String serverName = info.getServerAddress().toString(); String serverName = info.getServerAddress().toString();
HServerLoad load = serversToLoad.remove(serverName); HServerLoad load = serversToLoad.remove(serverName);
if (load != null) { if (load != null) {
Set<String> servers = loadToServers.get(load); synchronized (loadToServers) {
if (servers != null) { Set<String> servers = loadToServers.get(load);
servers.remove(serverName); if (servers != null) {
loadToServers.put(load, servers); servers.remove(serverName);
loadToServers.put(load, servers);
}
} }
} }
deadServers.add(server); deadServers.add(server);

View File

@ -20,42 +20,44 @@
package org.apache.hadoop.hbase.master; package org.apache.hadoop.hbase.master;
import java.io.IOException; import java.io.IOException;
import java.util.HashSet;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.io.BatchUpdate; import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.ipc.HRegionInterface;
/** /**
* Instantiated to delete a table * Instantiated to delete a table. Table must be offline.
* Note that it extends ChangeTableState, which takes care of disabling
* the table.
*/ */
class TableDelete extends ChangeTableState { class TableDelete extends TableOperation {
TableDelete(final HMaster master, final Text tableName) throws IOException { TableDelete(final HMaster master, final Text tableName) throws IOException {
super(master, tableName, false); super(master, tableName);
}
@Override
protected void processScanItem(
@SuppressWarnings("unused") String serverName,
@SuppressWarnings("unused") long startCode,
final HRegionInfo info) throws IOException {
if (isEnabled(info)) {
throw new TableNotDisabledException(tableName.toString());
}
} }
@Override @Override
protected void postProcessMeta(MetaRegion m, HRegionInterface server) protected void postProcessMeta(MetaRegion m, HRegionInterface server)
throws IOException { throws IOException {
// For regions that are being served, mark them for deletion
for (HashSet<HRegionInfo> s: servedRegions.values()) {
for (HRegionInfo i: s) {
master.regionManager.markRegionForDeletion(i.getRegionName());
}
}
// Unserved regions we can delete now
for (HRegionInfo i: unservedRegions) { for (HRegionInfo i: unservedRegions) {
// Delete the region // Delete the region
try { try {
HRegion.removeRegionFromMETA(server, m.getRegionName(), i.getRegionName());
HRegion.deleteRegion(this.master.fs, this.master.rootdir, i); HRegion.deleteRegion(this.master.fs, this.master.rootdir, i);
} catch (IOException e) { } catch (IOException e) {
@ -63,18 +65,9 @@ class TableDelete extends ChangeTableState {
RemoteExceptionHandler.checkIOException(e)); RemoteExceptionHandler.checkIOException(e));
} }
} }
super.postProcessMeta(m, server);
// delete the table's folder from fs. // delete the table's folder from fs.
master.fs.delete(new Path(master.rootdir, tableName.toString())); FileUtil.fullyDelete(master.fs,
} new Path(master.rootdir, tableName.toString()));
@Override
protected void updateRegionInfo(BatchUpdate b,
@SuppressWarnings("unused") HRegionInfo info) {
for (int i = 0; i < ALL_META_COLUMNS.length; i++) {
// Be sure to clean all cells
b.delete(ALL_META_COLUMNS[i]);
}
} }
} }

View File

@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.HServerInfo; import org.apache.hadoop.hbase.HServerInfo;
import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.hbase.io.RowResult; import org.apache.hadoop.hbase.io.RowResult;
@ -46,8 +47,7 @@ import org.apache.hadoop.hbase.util.Sleeper;
abstract class TableOperation implements HConstants { abstract class TableOperation implements HConstants {
static final Long ZERO_L = Long.valueOf(0L); static final Long ZERO_L = Long.valueOf(0L);
protected static final Log LOG = protected static final Log LOG = LogFactory.getLog(TableOperation.class);
LogFactory.getLog(TableOperation.class.getName());
protected Set<MetaRegion> metaRegions; protected Set<MetaRegion> metaRegions;
protected Text tableName; protected Text tableName;
@ -104,8 +104,8 @@ abstract class TableOperation implements HConstants {
HRegionInfo info = this.master.getHRegionInfo(values.getRow(), values); HRegionInfo info = this.master.getHRegionInfo(values.getRow(), values);
if (info == null) { if (info == null) {
emptyRows.add(values.getRow()); emptyRows.add(values.getRow());
throw new IOException(COL_REGIONINFO + " not found on " + LOG.error(COL_REGIONINFO + " not found on " + values.getRow());
values.getRow()); continue;
} }
String serverName = Writables.cellToString(values.get(COL_SERVER)); String serverName = Writables.cellToString(values.get(COL_SERVER));
long startCode = Writables.cellToLong(values.get(COL_STARTCODE)); long startCode = Writables.cellToLong(values.get(COL_STARTCODE));
@ -141,7 +141,7 @@ abstract class TableOperation implements HConstants {
} }
if (!tableExists) { if (!tableExists) {
throw new IOException(tableName + " does not exist"); throw new TableNotFoundException(tableName + " does not exist");
} }
postProcessMeta(m, server); postProcessMeta(m, server);

View File

@ -575,8 +575,9 @@ class Memcache {
* equal or older timestamp. If no keys, returns an empty List. Does not * equal or older timestamp. If no keys, returns an empty List. Does not
* return null. * return null.
*/ */
private List<HStoreKey> internalGetKeys(final SortedMap<HStoreKey, byte []> map, private List<HStoreKey> internalGetKeys(
final HStoreKey origin, final int versions) { final SortedMap<HStoreKey, byte []> map, final HStoreKey origin,
final int versions) {
long now = System.currentTimeMillis(); long now = System.currentTimeMillis();
List<HStoreKey> result = new ArrayList<HStoreKey>(); List<HStoreKey> result = new ArrayList<HStoreKey>();
@ -681,6 +682,7 @@ class Memcache {
} }
} }
/** {@inheritDoc} */
@Override @Override
public boolean next(HStoreKey key, SortedMap<Text, byte []> results) public boolean next(HStoreKey key, SortedMap<Text, byte []> results)
throws IOException { throws IOException {
@ -704,7 +706,12 @@ class Memcache {
} }
key.setRow(this.currentRow); key.setRow(this.currentRow);
key.setVersion(this.timestamp); key.setVersion(this.timestamp);
getFull(key, isWildcardScanner()? null: this.columns, deletes, rowResults); getFull(key, isWildcardScanner() ? null : this.columns, deletes,
rowResults);
for (Map.Entry<Text, Long> e: deletes.entrySet()) {
rowResults.put(e.getKey(),
new Cell(HLogEdit.deleteBytes.get(), e.getValue()));
}
for (Map.Entry<Text, Cell> e: rowResults.entrySet()) { for (Map.Entry<Text, Cell> e: rowResults.entrySet()) {
Text column = e.getKey(); Text column = e.getKey();
Cell c = e.getValue(); Cell c = e.getValue();
@ -722,6 +729,7 @@ class Memcache {
return results.size() > 0; return results.size() > 0;
} }
/** {@inheritDoc} */
public void close() { public void close() {
if (!scannerClosed) { if (!scannerClosed) {
scannerClosed = true; scannerClosed = true;

View File

@ -22,7 +22,6 @@ package org.apache.hadoop.hbase;
import java.io.IOException; import java.io.IOException;
import java.util.Iterator; import java.util.Iterator;
import java.util.Set; import java.util.Set;
import java.util.TreeMap;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -73,7 +72,6 @@ public class TestHBaseCluster extends HBaseClusterTestCase {
basic(); basic();
scanner(); scanner();
listTables(); listTables();
cleanup();
} }
private static final int FIRST_ROW = 1; private static final int FIRST_ROW = 1;
@ -204,11 +202,4 @@ public class TestHBaseCluster extends HBaseClusterTestCase {
assertTrue(families.contains(new Text(CONTENTS))); assertTrue(families.contains(new Text(CONTENTS)));
assertTrue(families.contains(new Text(ANCHOR))); assertTrue(families.contains(new Text(ANCHOR)));
} }
private void cleanup() throws IOException {
// Delete the table we created
admin.deleteTable(desc.getName());
}
} }