HBASE-8608 Do an edit of logs.. we log too much.

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1485902 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2013-05-24 00:11:00 +00:00
parent 61870830bf
commit bd7b479807
9 changed files with 46 additions and 40 deletions

View File

@ -34,7 +34,6 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.KeyComparator; import org.apache.hadoop.hbase.KeyValue.KeyComparator;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.hfile.HFile.Writer; import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
import org.apache.hadoop.hbase.io.hfile.HFileBlock.BlockWritable; import org.apache.hadoop.hbase.io.hfile.HFileBlock.BlockWritable;
@ -143,7 +142,7 @@ public class HFileWriterV2 extends AbstractHFileWriter {
// Meta data block index writer // Meta data block index writer
metaBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter(); metaBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter();
LOG.debug("Initialized with " + cacheConf); if (LOG.isTraceEnabled()) LOG.trace("Initialized with " + cacheConf);
} }
/** /**

View File

@ -239,7 +239,7 @@ public abstract class TableLockManager {
if (data == null) { if (data == null) {
return; return;
} }
LOG.debug("Table is locked by: " + LOG.debug("Table is locked by " +
String.format("[tableName=%s, lockOwner=%s, threadId=%s, " + String.format("[tableName=%s, lockOwner=%s, threadId=%s, " +
"purpose=%s, isShared=%s, createTime=%s]", Bytes.toString(data.getTableName().toByteArray()), "purpose=%s, isShared=%s, createTime=%s]", Bytes.toString(data.getTableName().toByteArray()),
ProtobufUtil.toServerName(data.getLockOwner()), data.getThreadId(), ProtobufUtil.toServerName(data.getLockOwner()), data.getThreadId(),
@ -270,9 +270,9 @@ public abstract class TableLockManager {
@Override @Override
public void acquire() throws IOException { public void acquire() throws IOException {
if (LOG.isDebugEnabled()) { if (LOG.isTraceEnabled()) {
LOG.debug("Attempt to acquire table " + (isShared ? "read" : "write") LOG.trace("Attempt to acquire table " + (isShared ? "read" : "write") +
+ " lock on :" + tableNameStr + " for:" + purpose); " lock on: " + tableNameStr + " for:" + purpose);
} }
lock = createTableLock(); lock = createTableLock();
@ -292,15 +292,15 @@ public abstract class TableLockManager {
Thread.currentThread().interrupt(); Thread.currentThread().interrupt();
throw new InterruptedIOException("Interrupted acquiring a lock"); throw new InterruptedIOException("Interrupted acquiring a lock");
} }
LOG.debug("Acquired table " + (isShared ? "read" : "write") if (LOG.isTraceEnabled()) LOG.trace("Acquired table " + (isShared ? "read" : "write")
+ " lock on :" + tableNameStr + " for:" + purpose); + " lock on " + tableNameStr + " for " + purpose);
} }
@Override @Override
public void release() throws IOException { public void release() throws IOException {
if (LOG.isDebugEnabled()) { if (LOG.isTraceEnabled()) {
LOG.debug("Attempt to release table " + (isShared ? "read" : "write") LOG.trace("Attempt to release table " + (isShared ? "read" : "write")
+ " lock on :" + tableNameStr); + " lock on " + tableNameStr);
} }
if (lock == null) { if (lock == null) {
throw new IllegalStateException("Table " + tableNameStr + throw new IllegalStateException("Table " + tableNameStr +
@ -314,8 +314,8 @@ public abstract class TableLockManager {
Thread.currentThread().interrupt(); Thread.currentThread().interrupt();
throw new InterruptedIOException(); throw new InterruptedIOException();
} }
if (LOG.isDebugEnabled()) { if (LOG.isTraceEnabled()) {
LOG.debug("Released table lock on :" + tableNameStr); LOG.trace("Released table lock on " + tableNameStr);
} }
} }

View File

@ -86,7 +86,9 @@ public class DefaultStoreFlusher extends StoreFlusher {
scanner.close(); scanner.close();
} }
LOG.info("Flushed, sequenceid=" + cacheFlushId +", memsize=" LOG.info("Flushed, sequenceid=" + cacheFlushId +", memsize="
+ StringUtils.humanReadableInt(flushed) +", into tmp file " + writer.getPath()); + StringUtils.humanReadableInt(flushed) +
", hasBloomFilter=" + writer.hasGeneralBloom() +
", into tmp file " + writer.getPath());
result.add(writer.getPath()); result.add(writer.getPath());
return result; return result;
} }

View File

@ -1507,10 +1507,10 @@ public class HRegion implements HeapSize { // , Writable{
} finally { } finally {
this.updatesLock.writeLock().unlock(); this.updatesLock.writeLock().unlock();
} }
String s = "Finished snapshotting " + this + String s = "Finished memstore snapshotting " + this +
", commencing wait for mvcc, flushsize=" + flushsize; ", syncing WAL and waiting on mvcc, flushsize=" + flushsize;
status.setStatus(s); status.setStatus(s);
LOG.debug(s); if (LOG.isTraceEnabled()) LOG.trace(s);
// sync unflushed WAL changes when deferred log sync is enabled // sync unflushed WAL changes when deferred log sync is enabled
// see HBASE-8208 for details // see HBASE-8208 for details
@ -1525,8 +1525,9 @@ public class HRegion implements HeapSize { // , Writable{
// were removed via a rollbackMemstore could be written to Hfiles. // were removed via a rollbackMemstore could be written to Hfiles.
mvcc.waitForRead(w); mvcc.waitForRead(w);
status.setStatus("Flushing stores"); s = "Flushing stores of " + this;
LOG.debug("Finished snapshotting, commencing flushing stores"); status.setStatus(s);
if (LOG.isTraceEnabled()) LOG.trace(s);
// Any failure from here on out will be catastrophic requiring server // Any failure from here on out will be catastrophic requiring server
// restart so hlog content can be replayed and put back into the memstore. // restart so hlog content can be replayed and put back into the memstore.

View File

@ -19,7 +19,6 @@
package org.apache.hadoop.hbase.regionserver; package org.apache.hadoop.hbase.regionserver;
import java.io.DataInput; import java.io.DataInput;
import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
@ -36,14 +35,12 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.KeyValue.KVComparator;
import org.apache.hadoop.hbase.KeyValue.MetaKeyComparator;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.Compression;
@ -808,8 +805,8 @@ public class StoreFile {
if (generalBloomFilterWriter != null) { if (generalBloomFilterWriter != null) {
this.bloomType = bloomType; this.bloomType = bloomType;
LOG.info("Bloom filter type for " + path + ": " + this.bloomType + ", " if (LOG.isTraceEnabled()) LOG.trace("Bloom filter type for " + path + ": " +
+ generalBloomFilterWriter.getClass().getSimpleName()); this.bloomType + ", " + generalBloomFilterWriter.getClass().getSimpleName());
} else { } else {
// Not using Bloom filters. // Not using Bloom filters.
this.bloomType = BloomType.NONE; this.bloomType = BloomType.NONE;
@ -825,7 +822,7 @@ public class StoreFile {
deleteFamilyBloomFilterWriter = null; deleteFamilyBloomFilterWriter = null;
} }
if (deleteFamilyBloomFilterWriter != null) { if (deleteFamilyBloomFilterWriter != null) {
LOG.info("Delete Family Bloom filter type for " + path + ": " if (LOG.isTraceEnabled()) LOG.trace("Delete Family Bloom filter type for " + path + ": "
+ deleteFamilyBloomFilterWriter.getClass().getSimpleName()); + deleteFamilyBloomFilterWriter.getClass().getSimpleName());
} }
this.checksumType = checksumType; this.checksumType = checksumType;
@ -1045,9 +1042,11 @@ public class StoreFile {
// Log final Bloom filter statistics. This needs to be done after close() // Log final Bloom filter statistics. This needs to be done after close()
// because compound Bloom filters might be finalized as part of closing. // because compound Bloom filters might be finalized as part of closing.
StoreFile.LOG.info((hasGeneralBloom ? "" : "NO ") + "General Bloom and " if (StoreFile.LOG.isTraceEnabled()) {
+ (hasDeleteFamilyBloom ? "" : "NO ") + "DeleteFamily" StoreFile.LOG.trace((hasGeneralBloom ? "" : "NO ") + "General Bloom and " +
+ " was added to HFile (" + getPath() + ") "); (hasDeleteFamilyBloom ? "" : "NO ") + "DeleteFamily" + " was added to HFile " +
getPath());
}
} }
@ -1424,9 +1423,11 @@ public class StoreFile {
} else { } else {
generalBloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, generalBloomFilter = BloomFilterFactory.createFromMeta(bloomMeta,
reader); reader);
LOG.info("Loaded " + bloomFilterType.toString() + " (" if (LOG.isTraceEnabled()) {
LOG.trace("Loaded " + bloomFilterType.toString() + " "
+ generalBloomFilter.getClass().getSimpleName() + generalBloomFilter.getClass().getSimpleName()
+ ") metadata for " + reader.getName()); + " metadata for " + reader.getName());
}
} }
} }
} else if (blockType == BlockType.DELETE_FAMILY_BLOOM_META) { } else if (blockType == BlockType.DELETE_FAMILY_BLOOM_META) {

View File

@ -347,7 +347,7 @@ class FSHLog implements HLog, Syncable {
this.logSyncer = new LogSyncer(this.optionalFlushInterval); this.logSyncer = new LogSyncer(this.optionalFlushInterval);
LOG.info("HLog configuration: blocksize=" + LOG.info("WAL/HLog configuration: blocksize=" +
StringUtils.byteDesc(this.blocksize) + StringUtils.byteDesc(this.blocksize) +
", rollsize=" + StringUtils.byteDesc(this.logrollsize) + ", rollsize=" + StringUtils.byteDesc(this.logrollsize) +
", enabled=" + this.enabled + ", enabled=" + this.enabled +
@ -519,9 +519,10 @@ class FSHLog implements HLog, Syncable {
this.hdfs_out = nextHdfsOut; this.hdfs_out = nextHdfsOut;
this.numEntries.set(0); this.numEntries.set(0);
} }
LOG.info("Rolled log" + (oldFile != null ? " for file=" + FSUtils.getPath(oldFile) LOG.info("Rolled WAL " + (oldFile != null ?
+ ", entries=" + oldNumEntries + ", filesize=" + this.fs.getFileStatus(oldFile).getLen() FSUtils.getPath(oldFile) + ", entries=" + oldNumEntries + ", filesize=" +
: "" ) + "; new path=" + FSUtils.getPath(newPath)); StringUtils.humanReadableInt(this.fs.getFileStatus(oldFile).getLen()):
"" ) + "; new WAL=" + FSUtils.getPath(newPath));
// Tell our listeners that a new log was created // Tell our listeners that a new log was created
if (!this.listeners.isEmpty()) { if (!this.listeners.isEmpty()) {

View File

@ -87,7 +87,9 @@ public class ProtobufLogWriter implements HLog.Writer {
} }
// instantiate trailer to default value. // instantiate trailer to default value.
trailer = WALTrailer.newBuilder().build(); trailer = WALTrailer.newBuilder().build();
LOG.debug("Writing protobuf WAL; path=" + path + ", compression=" + doCompress); if (LOG.isTraceEnabled()) {
LOG.trace("Initialized protobuf WAL=" + path + ", compression=" + doCompress);
}
} }
@Override @Override

View File

@ -135,8 +135,8 @@ public class CompoundBloomFilterWriter extends CompoundBloomFilterBase
chunk.compactBloom(); chunk.compactBloom();
if (LOG.isDebugEnabled() && prevByteSize != chunk.getByteSize()) { if (LOG.isTraceEnabled() && prevByteSize != chunk.getByteSize()) {
LOG.debug("Compacted Bloom chunk #" + readyChunk.chunkId + " from [" LOG.trace("Compacted Bloom chunk #" + readyChunk.chunkId + " from ["
+ prevMaxKeys + " max keys, " + prevByteSize + " bytes] to [" + prevMaxKeys + " max keys, " + prevByteSize + " bytes] to ["
+ chunk.getMaxKeys() + " max keys, " + chunk.getByteSize() + chunk.getMaxKeys() + " max keys, " + chunk.getByteSize()
+ " bytes]"); + " bytes]");

View File

@ -322,11 +322,11 @@ public abstract class FSUtils {
* @return output stream to the created file * @return output stream to the created file
* @throws IOException if the file cannot be created * @throws IOException if the file cannot be created
*/ */
@SuppressWarnings("deprecation")
public static FSDataOutputStream create(FileSystem fs, Path path, public static FSDataOutputStream create(FileSystem fs, Path path,
FsPermission perm, boolean overwrite) throws IOException { FsPermission perm, boolean overwrite) throws IOException {
LOG.debug("Creating file=" + path + " with permission=" + perm); if (LOG.isTraceEnabled()) {
LOG.trace("Creating file=" + path + " with permission=" + perm + ", overwrite=" + overwrite);
}
return fs.create(path, perm, overwrite, getDefaultBufferSize(fs), return fs.create(path, perm, overwrite, getDefaultBufferSize(fs),
getDefaultReplication(fs, path), getDefaultBlockSize(fs, path), null); getDefaultReplication(fs, path), getDefaultBlockSize(fs, path), null);
} }