HBASE-4694 Some cleanup of log messages in RS and M
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1190669 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
bdd4b8d012
commit
8c6f8b4b96
|
@ -691,6 +691,7 @@ Release 0.92.0 - Unreleased
|
|||
cluster instead of one per method (nkeywal)
|
||||
HBASE-3929 Add option to HFile tool to produce basic stats (Matteo
|
||||
Bertozzi and todd via todd)
|
||||
HBASE-4694 Some cleanup of log messages in RS and M
|
||||
|
||||
|
||||
TASKS
|
||||
|
|
|
@ -258,7 +258,6 @@ public class HFile {
|
|||
public static final WriterFactory getWriterFactory(Configuration conf,
|
||||
CacheConfig cacheConf) {
|
||||
int version = getFormatVersion(conf);
|
||||
LOG.debug("Using HFile format version " + version);
|
||||
switch (version) {
|
||||
case 1:
|
||||
return new HFileWriterV1.WriterFactoryV1(conf, cacheConf);
|
||||
|
|
|
@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.util.ClassSize;
|
|||
import org.apache.hadoop.hbase.util.CompoundBloomFilterWriter;
|
||||
import org.apache.hadoop.io.RawComparator;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
/**
|
||||
* Provides functionality to write ({@link BlockIndexWriter}) and read
|
||||
|
@ -750,12 +751,15 @@ public class HFileBlockIndex {
|
|||
totalBlockUncompressedSize +=
|
||||
blockWriter.getUncompressedSizeWithoutHeader();
|
||||
|
||||
LOG.info("Wrote a " + numLevels + "-level index with root level at pos "
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Wrote a " + numLevels + "-level index with root level at pos "
|
||||
+ out.getPos() + ", " + rootChunk.getNumEntries()
|
||||
+ " root-level entries, " + totalNumEntries + " total entries, "
|
||||
+ totalBlockOnDiskSize + " bytes total on-disk size, "
|
||||
+ totalBlockUncompressedSize + " bytes total uncompressed size.");
|
||||
|
||||
+ StringUtils.humanReadableInt(this.totalBlockOnDiskSize) +
|
||||
" on-disk size, "
|
||||
+ StringUtils.humanReadableInt(totalBlockUncompressedSize) +
|
||||
" total uncompressed size.");
|
||||
}
|
||||
return rootLevelIndexPos;
|
||||
}
|
||||
|
||||
|
|
|
@ -326,7 +326,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
|||
if (evictOnClose && cacheConf.isBlockCacheEnabled()) {
|
||||
int numEvicted = cacheConf.getBlockCache().evictBlocksByPrefix(name
|
||||
+ HFile.CACHE_KEY_SEPARATOR);
|
||||
LOG.debug("On close of file " + name + " evicted " + numEvicted
|
||||
LOG.debug("On close, file=" + name + " evicted=" + numEvicted
|
||||
+ " block(s)");
|
||||
}
|
||||
if (closeIStream && istream != null) {
|
||||
|
|
|
@ -178,7 +178,7 @@ public class HFileWriterV2 extends AbstractHFileWriter {
|
|||
// Meta data block index writer
|
||||
metaBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter();
|
||||
|
||||
LOG.debug("HFileWriter initialized with " + cacheConf);
|
||||
LOG.debug("Initialized with " + cacheConf);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -229,20 +229,6 @@ public class DefaultLoadBalancer implements LoadBalancer {
|
|||
NavigableMap<ServerAndLoad, List<HRegionInfo>> serversByLoad =
|
||||
new TreeMap<ServerAndLoad, List<HRegionInfo>>();
|
||||
int numRegions = 0;
|
||||
StringBuilder strBalanceParam = new StringBuilder("Server information: ");
|
||||
// Iterate so we can count regions as we build the map
|
||||
for (Map.Entry<ServerName, List<HRegionInfo>> server: clusterState.entrySet()) {
|
||||
List<HRegionInfo> regions = server.getValue();
|
||||
int sz = regions.size();
|
||||
if (sz == 0) emptyRegionServerPresent = true;
|
||||
numRegions += sz;
|
||||
serversByLoad.put(new ServerAndLoad(server.getKey(), sz), regions);
|
||||
strBalanceParam.append(server.getKey().getServerName()).append("=").
|
||||
append(server.getValue().size()).append(", ");
|
||||
}
|
||||
strBalanceParam.delete(strBalanceParam.length() - 2,
|
||||
strBalanceParam.length());
|
||||
LOG.debug(strBalanceParam.toString());
|
||||
|
||||
// Check if we even need to do any load balancing
|
||||
float average = (float)numRegions / numServers; // for logging
|
||||
|
@ -262,13 +248,13 @@ public class DefaultLoadBalancer implements LoadBalancer {
|
|||
int min = numRegions / numServers;
|
||||
int max = numRegions % numServers == 0 ? min : min + 1;
|
||||
|
||||
// Using to check banance result.
|
||||
strBalanceParam.delete(0, strBalanceParam.length());
|
||||
// Using to check balance result.
|
||||
StringBuilder strBalanceParam = new StringBuilder();
|
||||
strBalanceParam.append("Balance parameter: numRegions=").append(numRegions)
|
||||
.append(", numServers=").append(numServers).append(", max=").append(max)
|
||||
.append(", min=").append(min);
|
||||
LOG.debug(strBalanceParam.toString());
|
||||
|
||||
|
||||
// Balance the cluster
|
||||
// TODO: Look at data block locality or a more complex load to do this
|
||||
MinMaxPriorityQueue<RegionPlan> regionsToMove =
|
||||
|
|
|
@ -85,7 +85,7 @@ class LogRoller extends HasThread implements WALActionsListener {
|
|||
LOG.debug("Hlog roll period " + this.rollperiod + "ms elapsed");
|
||||
}
|
||||
} else if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("HLog roll manually triggered");
|
||||
LOG.debug("HLog roll requested");
|
||||
}
|
||||
rollLock.lock(); // FindBugs UL_UNRELEASED_LOCK_EXCEPTION_PATH
|
||||
try {
|
||||
|
|
|
@ -173,25 +173,27 @@ class MemStoreFlusher extends HasThread implements FlushRequester {
|
|||
|
||||
HRegion regionToFlush;
|
||||
if (bestFlushableRegion != null &&
|
||||
bestAnyRegion.memstoreSize.get() > 2 * bestFlushableRegion.memstoreSize.get()) {
|
||||
bestAnyRegion.memstoreSize.get() > 2 * bestFlushableRegion.memstoreSize.get()) {
|
||||
// Even if it's not supposed to be flushed, pick a region if it's more than twice
|
||||
// as big as the best flushable one - otherwise when we're under pressure we make
|
||||
// lots of little flushes and cause lots of compactions, etc, which just makes
|
||||
// life worse!
|
||||
LOG.info("Under global heap pressure: " +
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Under global heap pressure: " +
|
||||
"Region " + bestAnyRegion.getRegionNameAsString() + " has too many " +
|
||||
"store files, but is " +
|
||||
StringUtils.humanReadableInt(bestAnyRegion.memstoreSize.get()) +
|
||||
" vs best flushable region's " +
|
||||
StringUtils.humanReadableInt(bestFlushableRegion.memstoreSize.get()) +
|
||||
". Choosing the bigger.");
|
||||
regionToFlush = bestAnyRegion;
|
||||
}
|
||||
regionToFlush = bestAnyRegion;
|
||||
} else {
|
||||
if (bestFlushableRegion == null) {
|
||||
regionToFlush = bestAnyRegion;
|
||||
} else {
|
||||
regionToFlush = bestFlushableRegion;
|
||||
}
|
||||
if (bestFlushableRegion == null) {
|
||||
regionToFlush = bestAnyRegion;
|
||||
} else {
|
||||
regionToFlush = bestFlushableRegion;
|
||||
}
|
||||
}
|
||||
|
||||
Preconditions.checkState(regionToFlush.memstoreSize.get() > 0);
|
||||
|
@ -216,7 +218,8 @@ class MemStoreFlusher extends HasThread implements FlushRequester {
|
|||
fqe = flushQueue.poll(threadWakeFrequency, TimeUnit.MILLISECONDS);
|
||||
if (fqe == null || fqe instanceof WakeupFlushThread) {
|
||||
if (isAboveLowWaterMark()) {
|
||||
LOG.info("Flush thread woke up with memory above low water.");
|
||||
LOG.debug("Flush thread woke up because memory above low water=" +
|
||||
StringUtils.humanReadableInt(this.globalMemStoreLimitLowMark));
|
||||
if (!flushOneForGlobalPressure()) {
|
||||
// Wasn't able to flush any region, but we're above low water mark
|
||||
// This is unlikely to happen, but might happen when closing the
|
||||
|
|
|
@ -390,7 +390,7 @@ public class Store implements HeapSize {
|
|||
}
|
||||
|
||||
Path dstPath = StoreFile.getRandomFilename(fs, homedir);
|
||||
LOG.info("Renaming bulk load file " + srcPath + " to " + dstPath);
|
||||
LOG.debug("Renaming bulk load file " + srcPath + " to " + dstPath);
|
||||
StoreFile.rename(fs, srcPath, dstPath);
|
||||
|
||||
StoreFile sf = new StoreFile(fs, dstPath, this.conf, this.cacheConf,
|
||||
|
@ -573,7 +573,7 @@ public class Store implements HeapSize {
|
|||
Path dstPath = new Path(homedir, fileName);
|
||||
validateStoreFile(path);
|
||||
String msg = "Renaming flushed file at " + path + " to " + dstPath;
|
||||
LOG.info(msg);
|
||||
LOG.debug(msg);
|
||||
status.setStatus("Flushing " + this + ": " + msg);
|
||||
if (!fs.rename(path, dstPath)) {
|
||||
LOG.warn("Unable to rename " + path + " to " + dstPath);
|
||||
|
@ -593,7 +593,7 @@ public class Store implements HeapSize {
|
|||
// the flushing through the StoreFlusherImpl class
|
||||
HRegion.incrNumericPersistentMetric("cf." + this.toString() + ".flushSize",
|
||||
flushedSize.longValue());
|
||||
if(LOG.isInfoEnabled()) {
|
||||
if (LOG.isInfoEnabled()) {
|
||||
LOG.info("Added " + sf + ", entries=" + r.getEntries() +
|
||||
", sequenceid=" + logCacheFlushId +
|
||||
", filesize=" + StringUtils.humanReadableInt(r.length()));
|
||||
|
@ -720,7 +720,7 @@ public class Store implements HeapSize {
|
|||
LOG.info("Starting compaction of " + filesToCompact.size() + " file(s) in "
|
||||
+ this.storeNameStr + " of "
|
||||
+ this.region.getRegionInfo().getRegionNameAsString()
|
||||
+ " into " + region.getTmpDir() + ", seqid=" + maxId + ", totalSize="
|
||||
+ " into tmpdir=" + region.getTmpDir() + ", seqid=" + maxId + ", totalSize="
|
||||
+ StringUtils.humanReadableInt(cr.getSize()));
|
||||
|
||||
StoreFile sf = null;
|
||||
|
@ -741,8 +741,9 @@ public class Store implements HeapSize {
|
|||
LOG.info("Completed" + (cr.isMajor() ? " major " : " ") + "compaction of "
|
||||
+ filesToCompact.size() + " file(s) in " + this.storeNameStr + " of "
|
||||
+ this.region.getRegionInfo().getRegionNameAsString()
|
||||
+ "; new storefile name=" + (sf == null ? "none" : sf.toString())
|
||||
+ ", size=" + (sf == null ? "none" :
|
||||
+ " into " +
|
||||
(sf == null ? "none" : sf.getPath().getName()) +
|
||||
", size=" + (sf == null ? "none" :
|
||||
StringUtils.humanReadableInt(sf.getReader().length()))
|
||||
+ "; total size for store is "
|
||||
+ StringUtils.humanReadableInt(storeSize));
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.apache.hadoop.hbase.regionserver.compactions;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.RejectedExecutionHandler;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
|
@ -199,7 +198,7 @@ public class CompactionRequest implements Comparable<CompactionRequest>,
|
|||
server.checkFileSystem();
|
||||
} finally {
|
||||
s.finishRequest(this);
|
||||
LOG.debug("CompactSplitThread Status: " + server.compactSplitThread);
|
||||
LOG.debug("CompactSplitThread status: " + server.compactSplitThread);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -561,9 +561,6 @@ public class HLog implements Syncable {
|
|||
}
|
||||
this.filenum = System.currentTimeMillis();
|
||||
Path newPath = computeFilename();
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Enabling new writer for "+FSUtils.getPath(newPath));
|
||||
}
|
||||
|
||||
// Tell our listeners that a new log is about to be created
|
||||
if (!this.listeners.isEmpty()) {
|
||||
|
@ -597,7 +594,7 @@ public class HLog implements Syncable {
|
|||
this.numEntries.get() +
|
||||
", filesize=" +
|
||||
this.fs.getFileStatus(oldFile).getLen() + ". ": "") +
|
||||
"New hlog " + FSUtils.getPath(newPath));
|
||||
" for " + FSUtils.getPath(newPath));
|
||||
this.numEntries.set(0);
|
||||
}
|
||||
// Can we delete any of the old log files?
|
||||
|
|
|
@ -93,8 +93,9 @@ public class SequenceFileLogWriter implements HLog.Writer {
|
|||
this.writer_out = getSequenceFilePrivateFSDataOutputStreamAccessible();
|
||||
this.syncFs = getSyncFs();
|
||||
this.hflush = getHFlush();
|
||||
String msg =
|
||||
"syncFs=" + (this.syncFs != null) + ", hflush=" + (this.hflush != null);
|
||||
String msg = "Path=" + path +
|
||||
", syncFs=" + (this.syncFs != null) +
|
||||
", hflush=" + (this.hflush != null);
|
||||
if (this.syncFs != null || this.hflush != null) {
|
||||
LOG.debug(msg);
|
||||
} else {
|
||||
|
|
|
@ -173,12 +173,12 @@ public final class BloomFilterFactory {
|
|||
CacheConfig cacheConf, BloomType bloomType, int maxKeys,
|
||||
HFile.Writer writer) {
|
||||
if (!isGeneralBloomEnabled(conf)) {
|
||||
LOG.info("Bloom filters are disabled by configuration for "
|
||||
LOG.debug("Bloom filters are disabled by configuration for "
|
||||
+ writer.getPath()
|
||||
+ (conf == null ? " (configuration is null)" : ""));
|
||||
return null;
|
||||
} else if (bloomType == BloomType.NONE) {
|
||||
LOG.info("Bloom filter is turned off for the column family");
|
||||
LOG.debug("Bloom filter is turned off for the column family");
|
||||
return null;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue