HBASE-8608 Do an edit of logs.. we log too much.
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1485902 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
61870830bf
commit
bd7b479807
|
@ -34,7 +34,6 @@ import org.apache.hadoop.fs.FileSystem;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.KeyValue.KeyComparator;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileBlock.BlockWritable;
|
||||
|
@ -143,7 +142,7 @@ public class HFileWriterV2 extends AbstractHFileWriter {
|
|||
|
||||
// Meta data block index writer
|
||||
metaBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter();
|
||||
LOG.debug("Initialized with " + cacheConf);
|
||||
if (LOG.isTraceEnabled()) LOG.trace("Initialized with " + cacheConf);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -239,7 +239,7 @@ public abstract class TableLockManager {
|
|||
if (data == null) {
|
||||
return;
|
||||
}
|
||||
LOG.debug("Table is locked by: " +
|
||||
LOG.debug("Table is locked by " +
|
||||
String.format("[tableName=%s, lockOwner=%s, threadId=%s, " +
|
||||
"purpose=%s, isShared=%s, createTime=%s]", Bytes.toString(data.getTableName().toByteArray()),
|
||||
ProtobufUtil.toServerName(data.getLockOwner()), data.getThreadId(),
|
||||
|
@ -270,9 +270,9 @@ public abstract class TableLockManager {
|
|||
|
||||
@Override
|
||||
public void acquire() throws IOException {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Attempt to acquire table " + (isShared ? "read" : "write")
|
||||
+ " lock on :" + tableNameStr + " for:" + purpose);
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("Attempt to acquire table " + (isShared ? "read" : "write") +
|
||||
" lock on: " + tableNameStr + " for:" + purpose);
|
||||
}
|
||||
|
||||
lock = createTableLock();
|
||||
|
@ -292,15 +292,15 @@ public abstract class TableLockManager {
|
|||
Thread.currentThread().interrupt();
|
||||
throw new InterruptedIOException("Interrupted acquiring a lock");
|
||||
}
|
||||
LOG.debug("Acquired table " + (isShared ? "read" : "write")
|
||||
+ " lock on :" + tableNameStr + " for:" + purpose);
|
||||
if (LOG.isTraceEnabled()) LOG.trace("Acquired table " + (isShared ? "read" : "write")
|
||||
+ " lock on " + tableNameStr + " for " + purpose);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void release() throws IOException {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Attempt to release table " + (isShared ? "read" : "write")
|
||||
+ " lock on :" + tableNameStr);
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("Attempt to release table " + (isShared ? "read" : "write")
|
||||
+ " lock on " + tableNameStr);
|
||||
}
|
||||
if (lock == null) {
|
||||
throw new IllegalStateException("Table " + tableNameStr +
|
||||
|
@ -314,8 +314,8 @@ public abstract class TableLockManager {
|
|||
Thread.currentThread().interrupt();
|
||||
throw new InterruptedIOException();
|
||||
}
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Released table lock on :" + tableNameStr);
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("Released table lock on " + tableNameStr);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -86,7 +86,9 @@ public class DefaultStoreFlusher extends StoreFlusher {
|
|||
scanner.close();
|
||||
}
|
||||
LOG.info("Flushed, sequenceid=" + cacheFlushId +", memsize="
|
||||
+ StringUtils.humanReadableInt(flushed) +", into tmp file " + writer.getPath());
|
||||
+ StringUtils.humanReadableInt(flushed) +
|
||||
", hasBloomFilter=" + writer.hasGeneralBloom() +
|
||||
", into tmp file " + writer.getPath());
|
||||
result.add(writer.getPath());
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -1507,10 +1507,10 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
} finally {
|
||||
this.updatesLock.writeLock().unlock();
|
||||
}
|
||||
String s = "Finished snapshotting " + this +
|
||||
", commencing wait for mvcc, flushsize=" + flushsize;
|
||||
String s = "Finished memstore snapshotting " + this +
|
||||
", syncing WAL and waiting on mvcc, flushsize=" + flushsize;
|
||||
status.setStatus(s);
|
||||
LOG.debug(s);
|
||||
if (LOG.isTraceEnabled()) LOG.trace(s);
|
||||
|
||||
// sync unflushed WAL changes when deferred log sync is enabled
|
||||
// see HBASE-8208 for details
|
||||
|
@ -1525,8 +1525,9 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
// were removed via a rollbackMemstore could be written to Hfiles.
|
||||
mvcc.waitForRead(w);
|
||||
|
||||
status.setStatus("Flushing stores");
|
||||
LOG.debug("Finished snapshotting, commencing flushing stores");
|
||||
s = "Flushing stores of " + this;
|
||||
status.setStatus(s);
|
||||
if (LOG.isTraceEnabled()) LOG.trace(s);
|
||||
|
||||
// Any failure from here on out will be catastrophic requiring server
|
||||
// restart so hlog content can be replayed and put back into the memstore.
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
package org.apache.hadoop.hbase.regionserver;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.nio.ByteBuffer;
|
||||
|
@ -36,14 +35,12 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||
import org.apache.hadoop.hbase.KeyValue.MetaKeyComparator;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||
|
@ -808,8 +805,8 @@ public class StoreFile {
|
|||
|
||||
if (generalBloomFilterWriter != null) {
|
||||
this.bloomType = bloomType;
|
||||
LOG.info("Bloom filter type for " + path + ": " + this.bloomType + ", "
|
||||
+ generalBloomFilterWriter.getClass().getSimpleName());
|
||||
if (LOG.isTraceEnabled()) LOG.trace("Bloom filter type for " + path + ": " +
|
||||
this.bloomType + ", " + generalBloomFilterWriter.getClass().getSimpleName());
|
||||
} else {
|
||||
// Not using Bloom filters.
|
||||
this.bloomType = BloomType.NONE;
|
||||
|
@ -825,7 +822,7 @@ public class StoreFile {
|
|||
deleteFamilyBloomFilterWriter = null;
|
||||
}
|
||||
if (deleteFamilyBloomFilterWriter != null) {
|
||||
LOG.info("Delete Family Bloom filter type for " + path + ": "
|
||||
if (LOG.isTraceEnabled()) LOG.trace("Delete Family Bloom filter type for " + path + ": "
|
||||
+ deleteFamilyBloomFilterWriter.getClass().getSimpleName());
|
||||
}
|
||||
this.checksumType = checksumType;
|
||||
|
@ -1045,9 +1042,11 @@ public class StoreFile {
|
|||
|
||||
// Log final Bloom filter statistics. This needs to be done after close()
|
||||
// because compound Bloom filters might be finalized as part of closing.
|
||||
StoreFile.LOG.info((hasGeneralBloom ? "" : "NO ") + "General Bloom and "
|
||||
+ (hasDeleteFamilyBloom ? "" : "NO ") + "DeleteFamily"
|
||||
+ " was added to HFile (" + getPath() + ") ");
|
||||
if (StoreFile.LOG.isTraceEnabled()) {
|
||||
StoreFile.LOG.trace((hasGeneralBloom ? "" : "NO ") + "General Bloom and " +
|
||||
(hasDeleteFamilyBloom ? "" : "NO ") + "DeleteFamily" + " was added to HFile " +
|
||||
getPath());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
@ -1424,9 +1423,11 @@ public class StoreFile {
|
|||
} else {
|
||||
generalBloomFilter = BloomFilterFactory.createFromMeta(bloomMeta,
|
||||
reader);
|
||||
LOG.info("Loaded " + bloomFilterType.toString() + " ("
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("Loaded " + bloomFilterType.toString() + " "
|
||||
+ generalBloomFilter.getClass().getSimpleName()
|
||||
+ ") metadata for " + reader.getName());
|
||||
+ " metadata for " + reader.getName());
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (blockType == BlockType.DELETE_FAMILY_BLOOM_META) {
|
||||
|
|
|
@ -347,7 +347,7 @@ class FSHLog implements HLog, Syncable {
|
|||
|
||||
this.logSyncer = new LogSyncer(this.optionalFlushInterval);
|
||||
|
||||
LOG.info("HLog configuration: blocksize=" +
|
||||
LOG.info("WAL/HLog configuration: blocksize=" +
|
||||
StringUtils.byteDesc(this.blocksize) +
|
||||
", rollsize=" + StringUtils.byteDesc(this.logrollsize) +
|
||||
", enabled=" + this.enabled +
|
||||
|
@ -519,9 +519,10 @@ class FSHLog implements HLog, Syncable {
|
|||
this.hdfs_out = nextHdfsOut;
|
||||
this.numEntries.set(0);
|
||||
}
|
||||
LOG.info("Rolled log" + (oldFile != null ? " for file=" + FSUtils.getPath(oldFile)
|
||||
+ ", entries=" + oldNumEntries + ", filesize=" + this.fs.getFileStatus(oldFile).getLen()
|
||||
: "" ) + "; new path=" + FSUtils.getPath(newPath));
|
||||
LOG.info("Rolled WAL " + (oldFile != null ?
|
||||
FSUtils.getPath(oldFile) + ", entries=" + oldNumEntries + ", filesize=" +
|
||||
StringUtils.humanReadableInt(this.fs.getFileStatus(oldFile).getLen()):
|
||||
"" ) + "; new WAL=" + FSUtils.getPath(newPath));
|
||||
|
||||
// Tell our listeners that a new log was created
|
||||
if (!this.listeners.isEmpty()) {
|
||||
|
|
|
@ -87,7 +87,9 @@ public class ProtobufLogWriter implements HLog.Writer {
|
|||
}
|
||||
// instantiate trailer to default value.
|
||||
trailer = WALTrailer.newBuilder().build();
|
||||
LOG.debug("Writing protobuf WAL; path=" + path + ", compression=" + doCompress);
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("Initialized protobuf WAL=" + path + ", compression=" + doCompress);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -135,8 +135,8 @@ public class CompoundBloomFilterWriter extends CompoundBloomFilterBase
|
|||
|
||||
chunk.compactBloom();
|
||||
|
||||
if (LOG.isDebugEnabled() && prevByteSize != chunk.getByteSize()) {
|
||||
LOG.debug("Compacted Bloom chunk #" + readyChunk.chunkId + " from ["
|
||||
if (LOG.isTraceEnabled() && prevByteSize != chunk.getByteSize()) {
|
||||
LOG.trace("Compacted Bloom chunk #" + readyChunk.chunkId + " from ["
|
||||
+ prevMaxKeys + " max keys, " + prevByteSize + " bytes] to ["
|
||||
+ chunk.getMaxKeys() + " max keys, " + chunk.getByteSize()
|
||||
+ " bytes]");
|
||||
|
|
|
@ -322,11 +322,11 @@ public abstract class FSUtils {
|
|||
* @return output stream to the created file
|
||||
* @throws IOException if the file cannot be created
|
||||
*/
|
||||
@SuppressWarnings("deprecation")
|
||||
public static FSDataOutputStream create(FileSystem fs, Path path,
|
||||
FsPermission perm, boolean overwrite) throws IOException {
|
||||
LOG.debug("Creating file=" + path + " with permission=" + perm);
|
||||
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("Creating file=" + path + " with permission=" + perm + ", overwrite=" + overwrite);
|
||||
}
|
||||
return fs.create(path, perm, overwrite, getDefaultBufferSize(fs),
|
||||
getDefaultReplication(fs, path), getDefaultBlockSize(fs, path), null);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue