HBASE-1404 minor edit of regionserver logging messages
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@773627 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
39f470727c
commit
4bb48156bc
|
@ -220,6 +220,7 @@ Release 0.20.0 - Unreleased
|
||||||
HBASE-1397 Better distribution in the PerformanceEvaluation MapReduce
|
HBASE-1397 Better distribution in the PerformanceEvaluation MapReduce
|
||||||
when rows run to the Billions
|
when rows run to the Billions
|
||||||
HBASE-1393 Narrow synchronization in HLog
|
HBASE-1393 Narrow synchronization in HLog
|
||||||
|
HBASE-1404 minor edit of regionserver logging messages
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
|
|
|
@ -545,6 +545,11 @@ public class HLog implements HConstants, Syncable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
long took = System.currentTimeMillis() - now;
|
||||||
|
if (took > 1000) {
|
||||||
|
LOG.warn(Thread.currentThread().getName() + " took " + took +
|
||||||
|
"ms optional sync'ing HLog");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -730,7 +735,7 @@ public class HLog implements HConstants, Syncable {
|
||||||
// Check for possibly empty file. With appends, currently Hadoop reports
|
// Check for possibly empty file. With appends, currently Hadoop reports
|
||||||
// a zero length even if the file has been sync'd. Revisit if
|
// a zero length even if the file has been sync'd. Revisit if
|
||||||
// HADOOP-4751 is committed.
|
// HADOOP-4751 is committed.
|
||||||
boolean possiblyEmpty = logfiles[i].getLen() <= 0;
|
long length = logfiles[i].getLen();
|
||||||
HLogKey key = new HLogKey();
|
HLogKey key = new HLogKey();
|
||||||
HLogEdit val = new HLogEdit();
|
HLogEdit val = new HLogEdit();
|
||||||
try {
|
try {
|
||||||
|
@ -807,7 +812,8 @@ public class HLog implements HConstants, Syncable {
|
||||||
fs.delete(logfiles[i].getPath(), true);
|
fs.delete(logfiles[i].getPath(), true);
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
if (possiblyEmpty) {
|
if (length <= 0) {
|
||||||
|
LOG.warn("Empty log, continuing: " + logfiles[i]);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
throw e;
|
throw e;
|
||||||
|
|
|
@ -309,11 +309,6 @@ public class HRegion implements HConstants {
|
||||||
|
|
||||||
// Add one to the current maximum sequence id so new edits are beyond.
|
// Add one to the current maximum sequence id so new edits are beyond.
|
||||||
this.minSequenceId = maxSeqId + 1;
|
this.minSequenceId = maxSeqId + 1;
|
||||||
if (LOG.isDebugEnabled()) {
|
|
||||||
LOG.debug("Next sequence id for region " +
|
|
||||||
Bytes.toString(regionInfo.getRegionName()) + " is " +
|
|
||||||
this.minSequenceId);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get rid of any splits or merges that were lost in-progress
|
// Get rid of any splits or merges that were lost in-progress
|
||||||
FSUtils.deleteDirectory(this.fs, new Path(regiondir, SPLITDIR));
|
FSUtils.deleteDirectory(this.fs, new Path(regiondir, SPLITDIR));
|
||||||
|
@ -328,7 +323,7 @@ public class HRegion implements HConstants {
|
||||||
this.writestate.compacting = false;
|
this.writestate.compacting = false;
|
||||||
this.lastFlushTime = System.currentTimeMillis();
|
this.lastFlushTime = System.currentTimeMillis();
|
||||||
LOG.info("region " + this + "/" + this.regionInfo.getEncodedName() +
|
LOG.info("region " + this + "/" + this.regionInfo.getEncodedName() +
|
||||||
" available");
|
" available; sequence id is " + this.minSequenceId);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -742,7 +737,7 @@ public class HRegion implements HConstants {
|
||||||
return splitRow;
|
return splitRow;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
LOG.info("starting " + (majorCompaction? "major" : "") +
|
LOG.info("Starting" + (majorCompaction? " major " : " ") +
|
||||||
"compaction on region " + this);
|
"compaction on region " + this);
|
||||||
long startTime = System.currentTimeMillis();
|
long startTime = System.currentTimeMillis();
|
||||||
doRegionCompactionPrep();
|
doRegionCompactionPrep();
|
||||||
|
@ -1315,8 +1310,9 @@ public class HRegion implements HConstants {
|
||||||
byte [] row = b.getRow();
|
byte [] row = b.getRow();
|
||||||
// If we did not pass an existing row lock, obtain a new one
|
// If we did not pass an existing row lock, obtain a new one
|
||||||
Integer lid = getLock(lockid, row);
|
Integer lid = getLock(lockid, row);
|
||||||
|
long now = System.currentTimeMillis();
|
||||||
long commitTime = b.getTimestamp() == LATEST_TIMESTAMP?
|
long commitTime = b.getTimestamp() == LATEST_TIMESTAMP?
|
||||||
System.currentTimeMillis(): b.getTimestamp();
|
now: b.getTimestamp();
|
||||||
Set<byte []> latestTimestampDeletes = null;
|
Set<byte []> latestTimestampDeletes = null;
|
||||||
List<KeyValue> edits = new ArrayList<KeyValue>();
|
List<KeyValue> edits = new ArrayList<KeyValue>();
|
||||||
try {
|
try {
|
||||||
|
|
|
@ -222,16 +222,18 @@ class MemcacheFlusher extends Thread implements FlushRequester {
|
||||||
* not flushed.
|
* not flushed.
|
||||||
*/
|
*/
|
||||||
private boolean flushRegion(HRegion region, boolean removeFromQueue) {
|
private boolean flushRegion(HRegion region, boolean removeFromQueue) {
|
||||||
// Wait until it is safe to flush
|
// Wait until it is safe to flush.
|
||||||
|
// TODO: Fix. This block doesn't work if more than one store.
|
||||||
int count = 0;
|
int count = 0;
|
||||||
boolean triggered = false;
|
boolean triggered = false;
|
||||||
while (count++ < (blockingWaitTime / 500)) {
|
while (count++ < (blockingWaitTime / 500)) {
|
||||||
for (Store hstore: region.stores.values()) {
|
for (Store hstore: region.stores.values()) {
|
||||||
if (hstore.getStorefilesCount() > this.blockingStoreFilesNumber) {
|
int files = hstore.getStorefilesCount();
|
||||||
|
if (files > this.blockingStoreFilesNumber) {
|
||||||
if (!triggered) {
|
if (!triggered) {
|
||||||
server.compactSplitThread.compactionRequested(region, getName());
|
server.compactSplitThread.compactionRequested(region, getName());
|
||||||
LOG.info("Too many store files for region " + region + ": " +
|
LOG.info("Too many store files in store " + hstore + ": " +
|
||||||
hstore.getStorefilesCount() + ", waiting");
|
files + ", pausing");
|
||||||
triggered = true;
|
triggered = true;
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
|
@ -243,8 +245,7 @@ class MemcacheFlusher extends Thread implements FlushRequester {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (triggered) {
|
if (triggered) {
|
||||||
LOG.info("Compaction completed on region " + region +
|
LOG.info("Compaction triggered on region " + region + ", proceeding");
|
||||||
", proceeding");
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -204,10 +204,6 @@ public class Store implements HConstants {
|
||||||
|
|
||||||
// loadStoreFiles calculates this.maxSeqId. as side-effect.
|
// loadStoreFiles calculates this.maxSeqId. as side-effect.
|
||||||
this.storefiles.putAll(loadStoreFiles());
|
this.storefiles.putAll(loadStoreFiles());
|
||||||
if (LOG.isDebugEnabled() && this.storefiles.size() > 0) {
|
|
||||||
LOG.debug("Loaded " + this.storefiles.size() + " file(s) in Store " +
|
|
||||||
Bytes.toString(this.storeName) + ", max sequence id " + this.maxSeqId);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do reconstruction log.
|
// Do reconstruction log.
|
||||||
runReconstructionLog(reconstructionLog, this.maxSeqId, reporter);
|
runReconstructionLog(reconstructionLog, this.maxSeqId, reporter);
|
||||||
|
@ -701,7 +697,7 @@ public class Store implements HConstants {
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Completed" + (majorcompaction? " major ": " ") +
|
LOG.debug("Completed" + (majorcompaction? " major ": " ") +
|
||||||
"compaction of " + this.storeNameStr +
|
"compaction of " + this.storeNameStr +
|
||||||
" store size is " + StringUtils.humanReadableInt(storeSize));
|
"; store size is " + StringUtils.humanReadableInt(storeSize));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return checkSplit(forceSplit);
|
return checkSplit(forceSplit);
|
||||||
|
|
Loading…
Reference in New Issue