HBASE-668 HBASE-533 broke build

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@663764 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2008-06-05 22:22:13 +00:00
parent 1a7c2fc882
commit be8ef48869
13 changed files with 163 additions and 104 deletions

View File

@ -40,6 +40,7 @@ Hbase Change Log
to enable/disable table to enable/disable table
HBASE-654 API HTable.getMetadata().addFamily shouldn't be exposed to user HBASE-654 API HTable.getMetadata().addFamily shouldn't be exposed to user
HBASE-666 UnmodifyableHRegionInfo gives the wrong encoded name HBASE-666 UnmodifyableHRegionInfo gives the wrong encoded name
HBASE-668 HBASE-533 broke build
IMPROVEMENTS IMPROVEMENTS
HBASE-559 MR example job to count table rows HBASE-559 MR example job to count table rows

View File

@ -33,15 +33,18 @@ import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.io.BatchUpdate; import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.io.Cell; import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
/** /**
* The Region Historian task is to keep track of every modification a region * The Region Historian task is to keep track of every modification a region
* has to go trought. Public methods are used to update the information in the * has to go through. Public methods are used to update the information in the
* .META. table and to retreive it. * <code>.META.</code> table and to retrieve it. This is a Singleton. By
* default, the Historian is offline; it will not log. Its enabled in the
* regionserver and master down in their guts after there's some certainty the
* .META. has been deployed.
*/ */
public class RegionHistorian implements HConstants { public class RegionHistorian implements HConstants {
private static final Log LOG = LogFactory.getLog(RegionHistorian.class);
static final Log LOG = LogFactory.getLog(RegionHistorian.class);
private HTable metaTable; private HTable metaTable;
private GregorianCalendar cal = new GregorianCalendar(); private GregorianCalendar cal = new GregorianCalendar();
@ -69,23 +72,20 @@ public class RegionHistorian implements HConstants {
} }
/** /**
* Default constructor. Initializes reference to .META. table * Default constructor. Initializes reference to .META. table. Inaccessible.
* * Use {@link #getInstance(HBaseConfiguration)} to obtain the Singleton
* instance of this class.
*/ */
private RegionHistorian() { private RegionHistorian() {
HBaseConfiguration conf = new HBaseConfiguration(); super();
try {
metaTable = new HTable(conf, META_TABLE_NAME);
LOG.debug("Region historian is ready.");
} catch (IOException ioe) {
LOG.warn("Unable to create RegionHistorian", ioe);
}
} }
/** /**
* Singleton method * Get the RegionHistorian Singleton instance.
* * @param c Configuration to use. Used to create an {@link HTable} homed
* on <code>.META.</code>. The HTable instance is lazily instantiated to
* allow for the getting and storing aside of an Historian instance even
* in the case where <code>.META.</code> has not yet deployed.
* @return The region historian * @return The region historian
*/ */
public static RegionHistorian getInstance() { public static RegionHistorian getInstance() {
@ -98,15 +98,16 @@ public class RegionHistorian implements HConstants {
/** /**
* Returns, for a given region name, an ordered list by timestamp of all * Returns, for a given region name, an ordered list by timestamp of all
* values in the historian column of the .META. table. * values in the historian column of the .META. table.
*
* @param regionName * @param regionName
* Region name as a string * Region name as a string
* @return List of RegionHistoryInformation * @return List of RegionHistoryInformation or null if we're offline.
*/ */
public static List<RegionHistoryInformation> getRegionHistory( public List<RegionHistoryInformation> getRegionHistory(String regionName) {
String regionName) { if (!isOnline()) {
getInstance(); return null;
List<RegionHistoryInformation> informations = new ArrayList<RegionHistoryInformation>(); }
List<RegionHistoryInformation> informations =
new ArrayList<RegionHistoryInformation>();
try { try {
/* /*
* TODO REGION_HISTORIAN_KEYS is used because there is no other for the * TODO REGION_HISTORIAN_KEYS is used because there is no other for the
@ -115,7 +116,7 @@ public class RegionHistorian implements HConstants {
*/ */
for (HistorianColumnKey keyEnu : HistorianColumnKey.values()) { for (HistorianColumnKey keyEnu : HistorianColumnKey.values()) {
byte[] columnKey = keyEnu.key; byte[] columnKey = keyEnu.key;
Cell[] cells = historian.metaTable.get(Bytes.toBytes(regionName), Cell[] cells = this.metaTable.get(Bytes.toBytes(regionName),
columnKey, ALL_VERSIONS); columnKey, ALL_VERSIONS);
if (cells != null) { if (cells != null) {
for (Cell cell : cells) { for (Cell cell : cells) {
@ -134,33 +135,27 @@ public class RegionHistorian implements HConstants {
/** /**
* Method to add a creation event to the row in the .META table * Method to add a creation event to the row in the .META table
*
* @param info * @param info
*/ */
public static void addRegionAssignment(HRegionInfo info, String serverName) { public void addRegionAssignment(HRegionInfo info, String serverName) {
add(HistorianColumnKey.REGION_ASSIGNMENT.key, "Region assigned to server " add(HistorianColumnKey.REGION_ASSIGNMENT.key, "Region assigned to server "
+ serverName, info); + serverName, info);
} }
/** /**
* Method to add a creation event to the row in the .META table * Method to add a creation event to the row in the .META table
*
* @param info * @param info
*/ */
public static void addRegionCreation(HRegionInfo info) { public void addRegionCreation(HRegionInfo info) {
add(HistorianColumnKey.REGION_CREATION.key, "Region creation", info); add(HistorianColumnKey.REGION_CREATION.key, "Region creation", info);
} }
/** /**
* Method to add a opening event to the row in the .META table * Method to add a opening event to the row in the .META table
*
* @param info * @param info
* @param address * @param address
*/ */
public static void addRegionOpen(HRegionInfo info, HServerAddress address) { public void addRegionOpen(HRegionInfo info, HServerAddress address) {
add(HistorianColumnKey.REGION_OPEN.key, "Region opened on server : " add(HistorianColumnKey.REGION_OPEN.key, "Region opened on server : "
+ address.getHostname(), info); + address.getHostname(), info);
} }
@ -172,9 +167,8 @@ public class RegionHistorian implements HConstants {
* @param newInfo1 * @param newInfo1
* @param newInfo2 * @param newInfo2
*/ */
public static void addRegionSplit(HRegionInfo oldInfo, HRegionInfo newInfo1, public void addRegionSplit(HRegionInfo oldInfo, HRegionInfo newInfo1,
HRegionInfo newInfo2) { HRegionInfo newInfo2) {
HRegionInfo[] infos = new HRegionInfo[] { newInfo1, newInfo2 }; HRegionInfo[] infos = new HRegionInfo[] { newInfo1, newInfo2 };
for (HRegionInfo info : infos) { for (HRegionInfo info : infos) {
add(HistorianColumnKey.REGION_SPLIT.key, "Region split from : " add(HistorianColumnKey.REGION_SPLIT.key, "Region split from : "
@ -184,10 +178,9 @@ public class RegionHistorian implements HConstants {
/** /**
* Method to add a compaction event to the row in the .META table * Method to add a compaction event to the row in the .META table
*
* @param info * @param info
*/ */
public static void addRegionCompaction(HRegionInfo info, String timeTaken) { public void addRegionCompaction(HRegionInfo info, String timeTaken) {
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
add(HistorianColumnKey.REGION_COMPACTION.key, add(HistorianColumnKey.REGION_COMPACTION.key,
"Region compaction completed in " + timeTaken, info); "Region compaction completed in " + timeTaken, info);
@ -196,10 +189,9 @@ public class RegionHistorian implements HConstants {
/** /**
* Method to add a flush event to the row in the .META table * Method to add a flush event to the row in the .META table
*
* @param info * @param info
*/ */
public static void addRegionFlush(HRegionInfo info, String timeTaken) { public void addRegionFlush(HRegionInfo info, String timeTaken) {
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
add(HistorianColumnKey.REGION_FLUSH.key, "Region flush completed in " add(HistorianColumnKey.REGION_FLUSH.key, "Region flush completed in "
+ timeTaken, info); + timeTaken, info);
@ -212,7 +204,8 @@ public class RegionHistorian implements HConstants {
* @param text * @param text
* @param info * @param info
*/ */
private static void add(byte[] column, String text, HRegionInfo info) { private void add(byte[] column,
String text, HRegionInfo info) {
add(column, text, info, LATEST_TIMESTAMP); add(column, text, info, LATEST_TIMESTAMP);
} }
@ -223,14 +216,18 @@ public class RegionHistorian implements HConstants {
* @param info * @param info
* @param timestamp * @param timestamp
*/ */
private static void add(byte[] column, String text, HRegionInfo info, long timestamp) { private void add(byte[] column,
String text, HRegionInfo info, long timestamp) {
if (!isOnline()) {
// Its a noop
return;
}
if (!info.isMetaRegion()) { if (!info.isMetaRegion()) {
getInstance();
BatchUpdate batch = new BatchUpdate(info.getRegionName()); BatchUpdate batch = new BatchUpdate(info.getRegionName());
batch.setTimestamp(timestamp); batch.setTimestamp(timestamp);
batch.put(column, Bytes.toBytes(text)); batch.put(column, Bytes.toBytes(text));
try { try {
historian.metaTable.commit(batch); this.metaTable.commit(batch);
} catch (IOException ioe) { } catch (IOException ioe) {
LOG.warn("Unable to '" + text + "'", ioe); LOG.warn("Unable to '" + text + "'", ioe);
} }
@ -277,15 +274,38 @@ public class RegionHistorian implements HConstants {
} }
/** /**
* Returns the value of the timestamp processed * @return The value of the timestamp processed with the date formater.
* with the date formater.
* @return
*/ */
public String getTimestampAsString() { public String getTimestampAsString() {
cal.setTimeInMillis(timestamp); cal.setTimeInMillis(timestamp);
return dateFormat.format(cal.getTime()); return dateFormat.format(cal.getTime());
} }
} }
} /**
* @return True if the historian is online. When offline, will not add
* updates to the .META. table.
*/
public boolean isOnline() {
return this.metaTable != null;
}
/**
* @param c Online the historian. Invoke after cluster has spun up.
*/
public void online(final HBaseConfiguration c) {
try {
this.metaTable = new HTable(c, META_TABLE_NAME);
} catch (IOException ioe) {
LOG.error("Unable to create RegionHistorian", ioe);
}
}
/**
* Offlines the historian.
* @see #online(HBaseConfiguration)
*/
public void offline() {
this.metaTable = null;
}
}

View File

@ -407,7 +407,8 @@ abstract class BaseScanner extends Chore implements HConstants {
if (master.fs.exists(logDir)) { if (master.fs.exists(logDir)) {
regionManager.splitLogLock.lock(); regionManager.splitLogLock.lock();
try { try {
HLog.splitLog(master.rootdir, logDir, master.fs, master.conf); HLog.splitLog(master.rootdir, logDir, master.fs,
master.getConfiguration());
} finally { } finally {
regionManager.splitLogLock.unlock(); regionManager.splitLogLock.unlock();
} }

View File

@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.LocalHBaseCluster; import org.apache.hadoop.hbase.LocalHBaseCluster;
import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.RegionHistorian;
import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
@ -100,7 +101,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
volatile boolean shutdownRequested = false; volatile boolean shutdownRequested = false;
volatile boolean fsOk = true; volatile boolean fsOk = true;
final Path rootdir; final Path rootdir;
final HBaseConfiguration conf; private final HBaseConfiguration conf;
final FileSystem fs; final FileSystem fs;
final Random rand; final Random rand;
final int threadWakeFrequency; final int threadWakeFrequency;
@ -204,24 +205,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
} }
if (!fs.exists(rootRegionDir)) { if (!fs.exists(rootRegionDir)) {
LOG.info("BOOTSTRAP: creating ROOT and first META regions"); bootstrap();
try {
HRegion root = HRegion.createHRegion(HRegionInfo.ROOT_REGIONINFO,
this.rootdir, this.conf);
HRegion meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO,
this.rootdir, this.conf);
// Add first region from the META table to the ROOT region.
HRegion.addRegionToMETA(root, meta);
root.close();
root.getLog().closeAndDelete();
meta.close();
meta.getLog().closeAndDelete();
} catch (IOException e) {
e = RemoteExceptionHandler.checkIOException(e);
LOG.error("bootstrap", e);
throw e;
}
} }
} catch (IOException e) { } catch (IOException e) {
LOG.fatal("Not starting HMaster because:", e); LOG.fatal("Not starting HMaster because:", e);
@ -247,15 +231,36 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
conf.getInt("hbase.master.meta.thread.rescanfrequency", 60 * 1000); conf.getInt("hbase.master.meta.thread.rescanfrequency", 60 * 1000);
this.sleeper = new Sleeper(this.threadWakeFrequency, this.closed); this.sleeper = new Sleeper(this.threadWakeFrequency, this.closed);
serverManager = new ServerManager(this); serverManager = new ServerManager(this);
regionManager = new RegionManager(this); regionManager = new RegionManager(this);
// We're almost open for business // We're almost open for business
this.closed.set(false); this.closed.set(false);
LOG.info("HMaster initialized on " + this.address.toString()); LOG.info("HMaster initialized on " + this.address.toString());
} }
private void bootstrap() throws IOException {
LOG.info("BOOTSTRAP: creating ROOT and first META regions");
try {
HRegion root = HRegion.createHRegion(HRegionInfo.ROOT_REGIONINFO,
this.rootdir, this.conf);
HRegion meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO,
this.rootdir, this.conf);
// Add first region from the META table to the ROOT region.
HRegion.addRegionToMETA(root, meta);
root.close();
root.getLog().closeAndDelete();
meta.close();
meta.getLog().closeAndDelete();
} catch (IOException e) {
e = RemoteExceptionHandler.checkIOException(e);
LOG.error("bootstrap", e);
throw e;
}
}
/** /**
* Checks to see if the file system is still accessible. * Checks to see if the file system is still accessible.
* If not, sets closed * If not, sets closed
@ -363,6 +368,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
/* /*
* Clean up and close up shop * Clean up and close up shop
*/ */
RegionHistorian.getInstance().offline();
if (this.infoServer != null) { if (this.infoServer != null) {
LOG.info("Stopping infoServer"); LOG.info("Stopping infoServer");
try { try {

View File

@ -62,6 +62,8 @@ class ProcessRegionOpen extends ProcessRegionStatusChange {
protected boolean process() throws IOException { protected boolean process() throws IOException {
Boolean result = Boolean result =
new RetryableMetaOperation<Boolean>(this.metaRegion, this.master) { new RetryableMetaOperation<Boolean>(this.metaRegion, this.master) {
private final RegionHistorian historian = RegionHistorian.getInstance();
public Boolean call() throws IOException { public Boolean call() throws IOException {
LOG.info(regionInfo.getRegionNameAsString() + " open on " + LOG.info(regionInfo.getRegionNameAsString() + " open on " +
serverAddress.toString()); serverAddress.toString());
@ -82,7 +84,13 @@ class ProcessRegionOpen extends ProcessRegionStatusChange {
b.put(COL_SERVER, Bytes.toBytes(serverAddress.toString())); b.put(COL_SERVER, Bytes.toBytes(serverAddress.toString()));
b.put(COL_STARTCODE, startCode); b.put(COL_STARTCODE, startCode);
server.batchUpdate(metaRegionName, b); server.batchUpdate(metaRegionName, b);
RegionHistorian.addRegionOpen(regionInfo, serverAddress); if (!this.historian.isOnline()) {
// This is safest place to do the onlining of the historian in
// the master. When we get to here, we know there is a .META.
// for the historian to go against.
this.historian.online(this.master.getConfiguration());
}
this.historian.addRegionOpen(regionInfo, serverAddress);
if (isMetaTable) { if (isMetaTable) {
// It's a meta region. // It's a meta region.
MetaRegion m = new MetaRegion(serverAddress, MetaRegion m = new MetaRegion(serverAddress,
@ -109,4 +117,4 @@ class ProcessRegionOpen extends ProcessRegionStatusChange {
}.doWithRetries(); }.doWithRetries();
return result == null ? true : result; return result == null ? true : result;
} }
} }

View File

@ -242,7 +242,8 @@ class ProcessServerShutdown extends RegionServerOperation {
return false; return false;
} }
try { try {
HLog.splitLog(master.rootdir, oldLogDir, master.fs, master.conf); HLog.splitLog(master.rootdir, oldLogDir, master.fs,
master.getConfiguration());
} finally { } finally {
master.regionManager.splitLogLock.unlock(); master.regionManager.splitLogLock.unlock();
} }

View File

@ -118,12 +118,14 @@ class RegionManager implements HConstants {
// How many regions to assign a server at a time. // How many regions to assign a server at a time.
private final int maxAssignInOneGo; private final int maxAssignInOneGo;
private final HMaster master; private final HMaster master;
private final RegionHistorian historian;
RegionManager(HMaster master) { RegionManager(HMaster master) {
this.master = master; this.master = master;
this.historian = RegionHistorian.getInstance();
this.maxAssignInOneGo = this.master.conf. this.maxAssignInOneGo = this.master.getConfiguration().
getInt("hbase.regions.percheckin", 10); getInt("hbase.regions.percheckin", 10);
// The root region // The root region
@ -259,7 +261,7 @@ class RegionManager implements HConstants {
Bytes.toString(regionInfo.getRegionName())+ Bytes.toString(regionInfo.getRegionName())+
" to server " + serverName); " to server " + serverName);
unassignedRegions.put(regionInfo, Long.valueOf(now)); unassignedRegions.put(regionInfo, Long.valueOf(now));
RegionHistorian.addRegionAssignment(regionInfo, serverName); this.historian.addRegionAssignment(regionInfo, serverName);
returnMsgs.add(new HMsg(HMsg.Type.MSG_REGION_OPEN, regionInfo)); returnMsgs.add(new HMsg(HMsg.Type.MSG_REGION_OPEN, regionInfo));
if (--nregions <= 0) { if (--nregions <= 0) {
break; break;
@ -385,7 +387,7 @@ class RegionManager implements HConstants {
Bytes.toString(regionInfo.getRegionName()) + Bytes.toString(regionInfo.getRegionName()) +
" to the only server " + serverName); " to the only server " + serverName);
unassignedRegions.put(regionInfo, Long.valueOf(now)); unassignedRegions.put(regionInfo, Long.valueOf(now));
RegionHistorian.addRegionAssignment(regionInfo, serverName); this.historian.addRegionAssignment(regionInfo, serverName);
returnMsgs.add(new HMsg(HMsg.Type.MSG_REGION_OPEN, regionInfo)); returnMsgs.add(new HMsg(HMsg.Type.MSG_REGION_OPEN, regionInfo));
} }
} }
@ -544,8 +546,8 @@ class RegionManager implements HConstants {
byte [] metaRegionName) byte [] metaRegionName)
throws IOException { throws IOException {
// 2. Create the HRegion // 2. Create the HRegion
HRegion region = HRegion region = HRegion.createHRegion(newRegion, master.rootdir,
HRegion.createHRegion(newRegion, master.rootdir, master.conf); master.getConfiguration());
// 3. Insert into meta // 3. Insert into meta
HRegionInfo info = region.getRegionInfo(); HRegionInfo info = region.getRegionInfo();

View File

@ -80,7 +80,8 @@ class ServerManager implements HConstants {
public ServerManager(HMaster master) { public ServerManager(HMaster master) {
this.master = master; this.master = master;
serverLeases = new Leases(master.leaseTimeout, serverLeases = new Leases(master.leaseTimeout,
master.conf.getInt("hbase.master.lease.thread.wakefrequency", 15 * 1000)); master.getConfiguration().getInt("hbase.master.lease.thread.wakefrequency",
15 * 1000));
} }
/** /**

View File

@ -109,6 +109,7 @@ public class HRegion implements HConstants {
static final Random rand = new Random(); static final Random rand = new Random();
static final Log LOG = LogFactory.getLog(HRegion.class); static final Log LOG = LogFactory.getLog(HRegion.class);
final AtomicBoolean closed = new AtomicBoolean(false); final AtomicBoolean closed = new AtomicBoolean(false);
private final RegionHistorian historian;
/** /**
* Merge two HRegions. The regions must be adjacent andmust not overlap. * Merge two HRegions. The regions must be adjacent andmust not overlap.
@ -429,6 +430,7 @@ public class HRegion implements HConstants {
String encodedNameStr = Integer.toString(this.regionInfo.getEncodedName()); String encodedNameStr = Integer.toString(this.regionInfo.getEncodedName());
this.regiondir = new Path(basedir, encodedNameStr); this.regiondir = new Path(basedir, encodedNameStr);
Path oldLogFile = new Path(regiondir, HREGION_OLDLOGFILE_NAME); Path oldLogFile = new Path(regiondir, HREGION_OLDLOGFILE_NAME);
this.historian = RegionHistorian.getInstance();
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("Opening region " + this + "/" + LOG.debug("Opening region " + this + "/" +
@ -777,8 +779,8 @@ public class HRegion implements HConstants {
} }
HRegion regions[] = new HRegion [] {regionA, regionB}; HRegion regions[] = new HRegion [] {regionA, regionB};
RegionHistorian.addRegionSplit(this.regionInfo, this.historian.addRegionSplit(this.regionInfo,
regionA.getRegionInfo(), regionB.getRegionInfo()); regionA.getRegionInfo(), regionB.getRegionInfo());
return regions; return regions;
} }
@ -875,7 +877,7 @@ public class HRegion implements HConstants {
startTime); startTime);
LOG.info("compaction completed on region " + this + " in " + timeTaken); LOG.info("compaction completed on region " + this + " in " + timeTaken);
RegionHistorian.addRegionCompaction(regionInfo, timeTaken); this.historian.addRegionCompaction(regionInfo, timeTaken);
} finally { } finally {
synchronized (writestate) { synchronized (writestate) {
writestate.compacting = false; writestate.compacting = false;
@ -1055,8 +1057,9 @@ public class HRegion implements HConstants {
" in " + " in " +
(System.currentTimeMillis() - startTime) + "ms, sequence id=" + (System.currentTimeMillis() - startTime) + "ms, sequence id=" +
sequenceId); sequenceId);
if (!regionInfo.isMetaRegion()) if (!regionInfo.isMetaRegion()) {
RegionHistorian.addRegionFlush(regionInfo, timeTaken); this.historian.addRegionFlush(regionInfo, timeTaken);
}
} }
return true; return true;
} }
@ -1923,14 +1926,17 @@ public class HRegion implements HConstants {
* @throws IOException * @throws IOException
*/ */
public static HRegion createHRegion(final HRegionInfo info, final Path rootDir, public static HRegion createHRegion(final HRegionInfo info, final Path rootDir,
final HBaseConfiguration conf) throws IOException { final HBaseConfiguration conf)
throws IOException {
Path tableDir = Path tableDir =
HTableDescriptor.getTableDir(rootDir, info.getTableDesc().getName()); HTableDescriptor.getTableDir(rootDir, info.getTableDesc().getName());
Path regionDir = HRegion.getRegionDir(tableDir, info.getEncodedName()); Path regionDir = HRegion.getRegionDir(tableDir, info.getEncodedName());
FileSystem fs = FileSystem.get(conf); FileSystem fs = FileSystem.get(conf);
fs.mkdirs(regionDir); fs.mkdirs(regionDir);
if (!info.isMetaRegion()) // Note in historian the creation of new region.
RegionHistorian.addRegionCreation(info); if (!info.isMetaRegion()) {
RegionHistorian.getInstance().addRegionCreation(info);
}
return new HRegion(tableDir, return new HRegion(tableDir,
new HLog(fs, new Path(regionDir, HREGION_LOGDIR_NAME), conf, null), new HLog(fs, new Path(regionDir, HREGION_LOGDIR_NAME), conf, null),
fs, conf, info, null, null); fs, conf, info, null, null);
@ -1950,7 +1956,8 @@ public class HRegion implements HConstants {
* @throws IOException * @throws IOException
*/ */
public static HRegion openHRegion(final HRegionInfo info, final Path rootDir, public static HRegion openHRegion(final HRegionInfo info, final Path rootDir,
final HLog log, final HBaseConfiguration conf) throws IOException { final HLog log, final HBaseConfiguration conf)
throws IOException {
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("Opening region: " + info); LOG.debug("Opening region: " + info);
} }

View File

@ -63,6 +63,7 @@ import org.apache.hadoop.hbase.LeaseListener;
import org.apache.hadoop.hbase.Leases; import org.apache.hadoop.hbase.Leases;
import org.apache.hadoop.hbase.LocalHBaseCluster; import org.apache.hadoop.hbase.LocalHBaseCluster;
import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.RegionHistorian;
import org.apache.hadoop.hbase.RegionServerRunningException; import org.apache.hadoop.hbase.RegionServerRunningException;
import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.UnknownScannerException; import org.apache.hadoop.hbase.UnknownScannerException;
@ -403,6 +404,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
LOG.fatal("Unhandled exception. Aborting...", t); LOG.fatal("Unhandled exception. Aborting...", t);
abort(); abort();
} }
RegionHistorian.getInstance().offline();
this.leases.closeAfterLeasesExpire(); this.leases.closeAfterLeasesExpire();
this.worker.stop(); this.worker.stop();
this.server.stop(); this.server.stop();
@ -846,9 +848,14 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
} }
void openRegion(final HRegionInfo regionInfo) { void openRegion(final HRegionInfo regionInfo) {
// If historian is not online and this is not a meta region, online it.
if (!regionInfo.isMetaRegion() &&
!RegionHistorian.getInstance().isOnline()) {
RegionHistorian.getInstance().online(this.conf);
}
Integer mapKey = Bytes.mapKey(regionInfo.getRegionName()); Integer mapKey = Bytes.mapKey(regionInfo.getRegionName());
HRegion region = this.onlineRegions.get(mapKey); HRegion region = this.onlineRegions.get(mapKey);
if(region == null) { if (region == null) {
try { try {
region = new HRegion(HTableDescriptor.getTableDir(rootDir, region = new HRegion(HTableDescriptor.getTableDir(rootDir,
regionInfo.getTableDesc().getName()), regionInfo.getTableDesc().getName()),

View File

@ -78,9 +78,9 @@ public class TestGlobalMemcacheLimit extends HBaseClusterTestCase {
for (HRegion region : server.getOnlineRegions()) { for (HRegion region : server.getOnlineRegions()) {
region.flushcache(); region.flushcache();
} }
// make sure we're starting at 0 so that it's easy to predict what the // We used to assert that the memsize here was zero but with the addition
// results of our tests should be. // of region historian, its no longer true; an entry is added for the
assertEquals("Starting memcache size", 0, server.getGlobalMemcacheSize()); // flushes run above.
} }
/** /**
@ -93,8 +93,11 @@ public class TestGlobalMemcacheLimit extends HBaseClusterTestCase {
// make sure the region server says it is using as much memory as we think // make sure the region server says it is using as much memory as we think
// it is. // it is.
assertEquals("Global memcache size", dataSize, // Global cache size is now polluted by region historian data. We used
server.getGlobalMemcacheSize()); // to be able to do direct compare of global memcache and the data added
// but not since HBASE-533 went in. Compare has to be a bit sloppy.
assertTrue("Global memcache size",
dataSize <= server.getGlobalMemcacheSize());
} }
/** /**
@ -115,8 +118,11 @@ public class TestGlobalMemcacheLimit extends HBaseClusterTestCase {
int preFlushRows = (int)Math.floor(numRows); int preFlushRows = (int)Math.floor(numRows);
long dataAdded = populate(table1, preFlushRows, 500); long dataAdded = populate(table1, preFlushRows, 500);
assertEquals("Expected memcache size", dataAdded + startingDataSize, // Global cache size is now polluted by region historian data. We used
server.getGlobalMemcacheSize()); // to be able to do direct compare of global memcache and the data added
// but not since HBASE-533 went in.
long cacheSize = server.getGlobalMemcacheSize();
assertTrue("Expected memcache size", (dataAdded + startingDataSize) <= cacheSize);
populate(table1, 2, preFlushRows + 500); populate(table1, 2, preFlushRows + 500);
assertTrue("Post-flush memcache size", server.getGlobalMemcacheSize() <= 1024 * 1024); assertTrue("Post-flush memcache size", server.getGlobalMemcacheSize() <= 1024 * 1024);

View File

@ -165,14 +165,11 @@ public class TestRegionRebalancing extends HBaseClusterTestCase {
+ " regions. Load Average: " + avg); + " regions. Load Average: " + avg);
for (HRegionServer server : servers) { for (HRegionServer server : servers) {
LOG.debug(server.hashCode() + " Avg: " + avg + " actual: "
+ server.getOnlineRegions().size());
int serverLoad = server.getOnlineRegions().size(); int serverLoad = server.getOnlineRegions().size();
LOG.debug(server.hashCode() + " Avg: " + avg + " actual: " + serverLoad);
if (!(serverLoad <= avg + 2 && serverLoad >= avg - 2)) { if (!(serverLoad <= avg + 2 && serverLoad >= avg - 2)) {
success = false; success = false;
} }
} }
if (!success) { if (!success) {

View File

@ -1,10 +1,12 @@
<%@ page contentType="text/html;charset=UTF-8" <%@ page contentType="text/html;charset=UTF-8"
import="java.util.List" import="java.util.List"
import="org.apache.hadoop.hbase.RegionHistorian" import="org.apache.hadoop.hbase.RegionHistorian"
import="org.apache.hadoop.hbase.master.HMaster"
import="org.apache.hadoop.hbase.RegionHistorian.RegionHistoryInformation" import="org.apache.hadoop.hbase.RegionHistorian.RegionHistoryInformation"
import="org.apache.hadoop.hbase.HConstants"%><% import="org.apache.hadoop.hbase.HConstants"%><%
String regionName = request.getParameter("regionname"); String regionName = request.getParameter("regionname");
List<RegionHistoryInformation> informations = RegionHistorian.getRegionHistory(regionName); HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
List<RegionHistoryInformation> informations = RegionHistorian.getInstance().getRegionHistory(regionName);
%><?xml version="1.0" encoding="UTF-8" ?> %><?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">