HBASE-993 Turn of logging of every catalog table row entry on every scan

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@713189 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2008-11-11 21:56:55 +00:00
parent 78ce1b10c9
commit 2966b330ae
8 changed files with 35 additions and 34 deletions

View File

@ -111,6 +111,7 @@ Release 0.19.0 - Unreleased
HBASE-983 Declare Perl namespace in Hbase.thrift
HBASE-987 We need a Hbase Partitioner for TableMapReduceUtil.initTableReduceJob
MR Jobs (Billy Pearson via Stack)
HBASE-993 Turn of logging of every catalog table row entry on every scan
NEW FEATURES
HBASE-875 Use MurmurHash instead of JenkinsHash [in bloomfilters]

View File

@ -102,4 +102,4 @@ public class TableMapReduceUtil {
}
}
}
}
}

View File

@ -154,11 +154,11 @@ abstract class BaseScanner extends Chore implements HConstants {
Map<HRegionInfo, RowResult> splitParents =
new HashMap<HRegionInfo, RowResult>();
List<byte []> emptyRows = new ArrayList<byte []>();
int rows = 0;
try {
regionServer = master.connection.getHRegionConnection(region.getServer());
scannerId = regionServer.openScanner(region.getRegionName(),
COLUMN_FAMILY_ARRAY, EMPTY_START_ROW, HConstants.LATEST_TIMESTAMP, null);
int numberOfRegionsFound = 0;
while (true) {
RowResult values = regionServer.next(scannerId);
if (values == null || values.size() == 0) {
@ -171,20 +171,16 @@ abstract class BaseScanner extends Chore implements HConstants {
}
String serverName = Writables.cellToString(values.get(COL_SERVER));
long startCode = Writables.cellToLong(values.get(COL_STARTCODE));
if (LOG.isDebugEnabled()) {
LOG.debug(Thread.currentThread().getName() + " " + info.toString() +
"}, SERVER => '" + serverName + "', STARTCODE => " + startCode);
}
// Note Region has been assigned.
checkAssigned(info, serverName, startCode);
if (isSplitParent(info)) {
splitParents.put(info, values);
}
numberOfRegionsFound += 1;
rows += 1;
}
if (rootRegion) {
regionManager.setNumMetaRegions(numberOfRegionsFound);
regionManager.setNumMetaRegions(rows);
}
} catch (IOException e) {
if (e instanceof RemoteException) {
@ -226,8 +222,8 @@ abstract class BaseScanner extends Chore implements HConstants {
cleanupSplits(region.getRegionName(), regionServer, hri, e.getValue());
}
}
LOG.info(Thread.currentThread().getName() + " scan of meta region " +
region.toString() + " complete");
LOG.info(Thread.currentThread().getName() + " scan of " + rows +
" row(s) of meta region " + region.toString() + " complete");
}
/*
@ -427,4 +423,4 @@ abstract class BaseScanner extends Chore implements HConstants {
}
}
}
}
}

View File

@ -97,4 +97,4 @@ public class MetaRegion implements Comparable<MetaRegion> {
}
return result;
}
}
}

View File

@ -57,15 +57,14 @@ class MetaScanner extends BaseScanner {
// Don't retry if we get an error while scanning. Errors are most often
// caused by the server going away. Wait until next rescan interval when
// things should be back to normal
// things should be back to normal.
private boolean scanOneMetaRegion(MetaRegion region) {
boolean scanSuccessful = false;
while (!master.closed.get() && !regionManager.isInitialRootScanComplete() &&
regionManager.getRootRegionLocation() == null) {
master.sleeper.sleep();
}
if (master.closed.get()) {
return scanSuccessful;
return false;
}
try {
@ -74,7 +73,6 @@ class MetaScanner extends BaseScanner {
scanRegion(region);
regionManager.putMetaRegionOnline(region);
}
scanSuccessful = true;
} catch (IOException e) {
e = RemoteExceptionHandler.checkIOException(e);
LOG.warn("Scan one META region: " + region.toString(), e);
@ -85,7 +83,7 @@ class MetaScanner extends BaseScanner {
if (!regionManager.isMetaRegionOnline(region.getStartKey())) {
LOG.debug("Scanned region is no longer in map of online " +
"regions or its value has changed");
return scanSuccessful;
return false;
}
// Make sure the file system is still available
master.checkFileSystem();
@ -94,7 +92,7 @@ class MetaScanner extends BaseScanner {
// at least log it rather than go out silently.
LOG.error("Unexpected exception", e);
}
return scanSuccessful;
return true;
}
@Override
@ -125,22 +123,27 @@ class MetaScanner extends BaseScanner {
@Override
protected void maintenanceScan() {
List<MetaRegion> regions = regionManager.getListOfOnlineMetaRegions();
int regionCount = 0;
for (MetaRegion r: regions) {
scanOneMetaRegion(r);
regionCount++;
}
LOG.info("All " + regionCount + " .META. region(s) scanned");
metaRegionsScanned();
}
/**
/*
* Called by the meta scanner when it has completed scanning all meta
* regions. This wakes up any threads that were waiting for this to happen.
* @param totalRows Total rows scanned.
* @param regionCount Count of regions in .META. table.
* @return False if number of meta regions matches count of online regions.
*/
private synchronized boolean metaRegionsScanned() {
if (!regionManager.isInitialRootScanComplete() ||
regionManager.numMetaRegions() != regionManager.numOnlineMetaRegions()) {
return false;
}
LOG.info("all meta regions scanned");
notifyAll();
return true;
}

View File

@ -36,14 +36,17 @@ class RootScanner extends BaseScanner {
super(master, regionManager, true, master.metaRescanInterval, master.closed);
}
// Don't retry if we get an error while scanning. Errors are most often
// caused by the server going away. Wait until next rescan interval when
// things should be back to normal
/*
* Don't retry if we get an error while scanning. Errors are most often
*
* caused by the server going away. Wait until next rescan interval when
* things should be back to normal.
* @return True if successfully scanned.
*/
private boolean scanRoot() {
boolean scanSuccessful = false;
master.waitForRootRegionLocation();
if (master.closed.get()) {
return scanSuccessful;
return false;
}
try {
@ -54,7 +57,6 @@ class RootScanner extends BaseScanner {
HRegionInfo.ROOT_REGIONINFO.getRegionName()));
}
}
scanSuccessful = true;
} catch (IOException e) {
e = RemoteExceptionHandler.checkIOException(e);
LOG.warn("Scan ROOT region", e);
@ -65,12 +67,12 @@ class RootScanner extends BaseScanner {
// at least log it rather than go out silently.
LOG.error("Unexpected exception", e);
}
return scanSuccessful;
return true;
}
@Override
protected boolean initialScan() {
initialScanComplete = scanRoot();
this.initialScanComplete = scanRoot();
return initialScanComplete;
}

View File

@ -87,7 +87,7 @@ class ServerManager implements HConstants {
master.getConfiguration().getInt("hbase.master.lease.thread.wakefrequency",
15 * 1000));
this.loggingPeriodForAverageLoad = master.getConfiguration().
getLong("hbase.master.avgload.logging.period", 15000);
getLong("hbase.master.avgload.logging.period", 60000);
}
/**

View File

@ -369,7 +369,7 @@ public class HRegion implements HConstants {
// Disable compacting and flushing by background threads for this
// region.
writestate.writesEnabled = false;
LOG.debug("Compactions and cache flushes disabled for region " + this);
LOG.debug("Closing " + this + ": compactions & flushes disabled ");
while (writestate.compacting || writestate.flushing) {
LOG.debug("waiting for" +
(writestate.compacting ? " compaction" : "") +
@ -384,7 +384,6 @@ public class HRegion implements HConstants {
}
}
newScannerLock.writeLock().lock();
LOG.debug("Scanners disabled for region " + this);
try {
// Wait for active scanners to finish. The write lock we hold will
// prevent new scanners from being created.
@ -399,9 +398,9 @@ public class HRegion implements HConstants {
}
}
}
LOG.debug("No more active scanners for region " + this);
splitsAndClosesLock.writeLock().lock();
LOG.debug("Updates disabled for region " + this);
LOG.debug("Updates disabled for region, no outstanding scanners on " +
this);
try {
// Write lock means no more row locks can be given out. Wait on
// outstanding row locks to come in before we close so we do not drop
@ -420,7 +419,7 @@ public class HRegion implements HConstants {
}
this.closed.set(true);
LOG.info("closed " + this);
LOG.info("Closed " + this);
return result;
} finally {
splitsAndClosesLock.writeLock().unlock();