HBASE-1404 minor edit of regionserver logging messages
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@773627 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
39f470727c
commit
4bb48156bc
|
@ -220,6 +220,7 @@ Release 0.20.0 - Unreleased
|
|||
HBASE-1397 Better distribution in the PerformanceEvaluation MapReduce
|
||||
when rows run to the Billions
|
||||
HBASE-1393 Narrow synchronization in HLog
|
||||
HBASE-1404 minor edit of regionserver logging messages
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
|
|
|
@ -545,6 +545,11 @@ public class HLog implements HConstants, Syncable {
|
|||
}
|
||||
}
|
||||
}
|
||||
long took = System.currentTimeMillis() - now;
|
||||
if (took > 1000) {
|
||||
LOG.warn(Thread.currentThread().getName() + " took " + took +
|
||||
"ms optional sync'ing HLog");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -730,7 +735,7 @@ public class HLog implements HConstants, Syncable {
|
|||
// Check for possibly empty file. With appends, currently Hadoop reports
|
||||
// a zero length even if the file has been sync'd. Revisit if
|
||||
// HADOOP-4751 is committed.
|
||||
boolean possiblyEmpty = logfiles[i].getLen() <= 0;
|
||||
long length = logfiles[i].getLen();
|
||||
HLogKey key = new HLogKey();
|
||||
HLogEdit val = new HLogEdit();
|
||||
try {
|
||||
|
@ -807,7 +812,8 @@ public class HLog implements HConstants, Syncable {
|
|||
fs.delete(logfiles[i].getPath(), true);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
if (possiblyEmpty) {
|
||||
if (length <= 0) {
|
||||
LOG.warn("Empty log, continuing: " + logfiles[i]);
|
||||
continue;
|
||||
}
|
||||
throw e;
|
||||
|
|
|
@ -309,11 +309,6 @@ public class HRegion implements HConstants {
|
|||
|
||||
// Add one to the current maximum sequence id so new edits are beyond.
|
||||
this.minSequenceId = maxSeqId + 1;
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Next sequence id for region " +
|
||||
Bytes.toString(regionInfo.getRegionName()) + " is " +
|
||||
this.minSequenceId);
|
||||
}
|
||||
|
||||
// Get rid of any splits or merges that were lost in-progress
|
||||
FSUtils.deleteDirectory(this.fs, new Path(regiondir, SPLITDIR));
|
||||
|
@ -328,7 +323,7 @@ public class HRegion implements HConstants {
|
|||
this.writestate.compacting = false;
|
||||
this.lastFlushTime = System.currentTimeMillis();
|
||||
LOG.info("region " + this + "/" + this.regionInfo.getEncodedName() +
|
||||
" available");
|
||||
" available; sequence id is " + this.minSequenceId);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -742,8 +737,8 @@ public class HRegion implements HConstants {
|
|||
return splitRow;
|
||||
}
|
||||
}
|
||||
LOG.info("starting " + (majorCompaction? "major" : "") +
|
||||
" compaction on region " + this);
|
||||
LOG.info("Starting" + (majorCompaction? " major " : " ") +
|
||||
"compaction on region " + this);
|
||||
long startTime = System.currentTimeMillis();
|
||||
doRegionCompactionPrep();
|
||||
long maxSize = -1;
|
||||
|
@ -1315,8 +1310,9 @@ public class HRegion implements HConstants {
|
|||
byte [] row = b.getRow();
|
||||
// If we did not pass an existing row lock, obtain a new one
|
||||
Integer lid = getLock(lockid, row);
|
||||
long now = System.currentTimeMillis();
|
||||
long commitTime = b.getTimestamp() == LATEST_TIMESTAMP?
|
||||
System.currentTimeMillis(): b.getTimestamp();
|
||||
now: b.getTimestamp();
|
||||
Set<byte []> latestTimestampDeletes = null;
|
||||
List<KeyValue> edits = new ArrayList<KeyValue>();
|
||||
try {
|
||||
|
|
|
@ -222,16 +222,18 @@ class MemcacheFlusher extends Thread implements FlushRequester {
|
|||
* not flushed.
|
||||
*/
|
||||
private boolean flushRegion(HRegion region, boolean removeFromQueue) {
|
||||
// Wait until it is safe to flush
|
||||
// Wait until it is safe to flush.
|
||||
// TODO: Fix. This block doesn't work if more than one store.
|
||||
int count = 0;
|
||||
boolean triggered = false;
|
||||
while (count++ < (blockingWaitTime / 500)) {
|
||||
for (Store hstore: region.stores.values()) {
|
||||
if (hstore.getStorefilesCount() > this.blockingStoreFilesNumber) {
|
||||
int files = hstore.getStorefilesCount();
|
||||
if (files > this.blockingStoreFilesNumber) {
|
||||
if (!triggered) {
|
||||
server.compactSplitThread.compactionRequested(region, getName());
|
||||
LOG.info("Too many store files for region " + region + ": " +
|
||||
hstore.getStorefilesCount() + ", waiting");
|
||||
LOG.info("Too many store files in store " + hstore + ": " +
|
||||
files + ", pausing");
|
||||
triggered = true;
|
||||
}
|
||||
try {
|
||||
|
@ -243,8 +245,7 @@ class MemcacheFlusher extends Thread implements FlushRequester {
|
|||
}
|
||||
}
|
||||
if (triggered) {
|
||||
LOG.info("Compaction completed on region " + region +
|
||||
", proceeding");
|
||||
LOG.info("Compaction triggered on region " + region + ", proceeding");
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -204,10 +204,6 @@ public class Store implements HConstants {
|
|||
|
||||
// loadStoreFiles calculates this.maxSeqId. as side-effect.
|
||||
this.storefiles.putAll(loadStoreFiles());
|
||||
if (LOG.isDebugEnabled() && this.storefiles.size() > 0) {
|
||||
LOG.debug("Loaded " + this.storefiles.size() + " file(s) in Store " +
|
||||
Bytes.toString(this.storeName) + ", max sequence id " + this.maxSeqId);
|
||||
}
|
||||
|
||||
// Do reconstruction log.
|
||||
runReconstructionLog(reconstructionLog, this.maxSeqId, reporter);
|
||||
|
@ -699,9 +695,9 @@ public class Store implements HConstants {
|
|||
// Move the compaction into place.
|
||||
completeCompaction(filesToCompact, writer);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Completed " + (majorcompaction? "major": "") +
|
||||
" compaction of " + this.storeNameStr +
|
||||
" store size is " + StringUtils.humanReadableInt(storeSize));
|
||||
LOG.debug("Completed" + (majorcompaction? " major ": " ") +
|
||||
"compaction of " + this.storeNameStr +
|
||||
"; store size is " + StringUtils.humanReadableInt(storeSize));
|
||||
}
|
||||
}
|
||||
return checkSplit(forceSplit);
|
||||
|
|
Loading…
Reference in New Issue