From fe6bbb0d29476bafbb26b5e693e2eefbf669c649 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Sat, 25 May 2013 06:05:07 +0000 Subject: [PATCH] HBASE-8621 More log edits; we log too much git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1486291 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/hadoop/hbase/client/ClientScanner.java | 3 ++- .../apache/hadoop/hbase/master/CatalogJanitor.java | 4 ++-- .../hbase/master/balancer/BaseLoadBalancer.java | 5 ++++- .../hbase/master/handler/CreateTableHandler.java | 2 +- .../hadoop/hbase/regionserver/StoreFileInfo.java | 9 +++------ .../hadoop/hbase/regionserver/wal/FSHLog.java | 13 ++++++------- .../java/org/apache/hadoop/hbase/util/FSUtils.java | 3 ++- .../zookeeper/lock/ZKInterProcessLockBase.java | 4 ++-- 8 files changed, 22 insertions(+), 21 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java index 59047330f63..581e424dc25 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java @@ -204,7 +204,8 @@ public class ClientScanner extends AbstractClientScanner { localStartKey = this.scan.getStartRow(); } - if (LOG.isDebugEnabled()) { + if (LOG.isDebugEnabled() && this.currentRegion != null) { + // Only worth logging if NOT first region in scan. LOG.debug("Advancing internal scanner to startKey at '" + Bytes.toStringBinary(localStartKey) + "'"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java index a99dd7e1182..8d5e280c3f8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java @@ -265,8 +265,8 @@ public class CatalogJanitor extends Chore { LOG.info("Scanned " + count + " catalog row(s), gc'd " + mergeCleaned + " unreferenced merged region(s) and " + splitCleaned + " unreferenced parent region(s)"); - } else if (LOG.isDebugEnabled()) { - LOG.debug("Scanned " + count + " catalog row(s), gc'd " + mergeCleaned + } else if (LOG.isTraceEnabled()) { + LOG.trace("Scanned " + count + " catalog row(s), gc'd " + mergeCleaned + " unreferenced merged region(s) and " + splitCleaned + " unreferenced parent region(s)"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java index 0e4ae1d1e53..0326c20b6b3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java @@ -378,11 +378,14 @@ public abstract class BaseLoadBalancer implements LoadBalancer { int ceiling = (int) Math.ceil(average * (1 + slop)); if (!(cs.getMinLoad() > ceiling || cs.getMaxLoad() < floor)) { NavigableMap> serversByLoad = cs.getServersByLoad(); - LOG.info("Skipping load balancing because balanced cluster; " + + if (LOG.isTraceEnabled()) { + // If nothing to balance, then don't say anything unless trace-level logging. + LOG.trace("Skipping load balancing because balanced cluster; " + "servers=" + cs.getNumServers() + " " + "regions=" + cs.getNumRegions() + " average=" + average + " " + "mostloaded=" + serversByLoad.lastKey().getLoad() + " leastloaded=" + serversByLoad.firstKey().getLoad()); + } return false; } return true; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java index ff4ad21d507..7fe039426d2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java @@ -143,7 +143,7 @@ public class CreateTableHandler extends EventHandler { @Override public void process() { String tableName = this.hTableDescriptor.getNameAsString(); - LOG.info("Attempting to create the table " + tableName); + LOG.info("Create table " + tableName); try { MasterCoprocessorHost cpHost = ((HMaster) this.server).getCoprocessorHost(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java index d88895f60d7..233efb02be4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java @@ -27,16 +27,14 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HDFSBlocksDistribution; -import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; import org.apache.hadoop.hbase.io.HFileLink; -import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.io.HalfStoreFileReader; +import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.util.FSUtils; @@ -101,13 +99,12 @@ public class StoreFileInfo { public StoreFileInfo(final Configuration conf, final FileSystem fs, final FileStatus fileStatus) throws IOException { this.fileStatus = fileStatus; - Path p = fileStatus.getPath(); if (HFileLink.isHFileLink(p)) { // HFileLink this.reference = null; this.link = new HFileLink(conf, p); - LOG.debug("Store file " + p + " is a link"); + if (LOG.isTraceEnabled()) LOG.trace(p + " is a link"); } else if (isReference(p)) { this.reference = Reference.read(fs, p); Path referencePath = getReferredToFile(p); @@ -118,7 +115,7 @@ public class StoreFileInfo { // Reference this.link = null; } - LOG.debug("Store file " + p + " is a " + reference.getFileRegion() + + if (LOG.isTraceEnabled()) LOG.trace(p + " is a " + reference.getFileRegion() + " reference to " + referencePath); } else if (isHFile(p)) { // HFile diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index f62a603be37..31440a7fe4a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -519,10 +519,10 @@ class FSHLog implements HLog, Syncable { this.hdfs_out = nextHdfsOut; this.numEntries.set(0); } - LOG.info("Rolled WAL " + (oldFile != null ? - FSUtils.getPath(oldFile) + ", entries=" + oldNumEntries + ", filesize=" + - StringUtils.humanReadableInt(this.fs.getFileStatus(oldFile).getLen()): - "" ) + "; new WAL=" + FSUtils.getPath(newPath)); + if (oldFile == null) LOG.info("New WAL " + FSUtils.getPath(newPath)); + else LOG.info("Rolled WAL " + FSUtils.getPath(oldFile) + " with entries=" + oldNumEntries + + ", filesize=" + StringUtils.humanReadableInt(this.fs.getFileStatus(oldFile).getLen()) + + "; new WAL " + FSUtils.getPath(newPath)); // Tell our listeners that a new log was created if (!this.listeners.isEmpty()) { @@ -765,8 +765,7 @@ class FSHLog implements HLog, Syncable { } } } - LOG.debug("Moved " + files.length + " log files to " + - FSUtils.getPath(this.oldLogDir)); + LOG.debug("Moved " + files.length + " WAL file(s) to " + FSUtils.getPath(this.oldLogDir)); } if (!fs.delete(dir, true)) { LOG.info("Unable to delete " + dir); @@ -806,7 +805,7 @@ class FSHLog implements HLog, Syncable { synchronized (updateLock) { this.closed = true; if (LOG.isDebugEnabled()) { - LOG.debug("closing hlog writer in " + this.dir.toString()); + LOG.debug("Closing WAL writer in " + this.dir.toString()); } if (this.writer != null) { this.writer.close(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index c6b8c9c67ae..c189a35b451 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -292,7 +292,8 @@ public abstract class FSUtils { // Function was properly called, but threw it's own exception. throw new IOException(ite.getCause()); } catch (NoSuchMethodException e) { - LOG.debug("Ignoring (most likely Reflection related exception) " + e); + LOG.debug("DFS Client does not support most favored nodes create; using default create"); + if (LOG.isTraceEnabled()) LOG.trace("Ignoring; use default create", e); } catch (IllegalArgumentException e) { LOG.debug("Ignoring (most likely Reflection related exception) " + e); } catch (SecurityException e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessLockBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessLockBase.java index d06800ccd79..d691bd645bd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessLockBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessLockBase.java @@ -223,7 +223,7 @@ public abstract class ZKInterProcessLockBase implements InterProcessLock { } } updateAcquiredLock(createdZNode); - LOG.debug("Successfully acquired a lock for " + createdZNode); + LOG.debug("Acquired a lock for " + createdZNode); return true; } @@ -325,7 +325,7 @@ public abstract class ZKInterProcessLockBase implements InterProcessLock { } } if (LOG.isDebugEnabled()) { - LOG.debug("Successfully released " + lock.getPath()); + LOG.debug("Released " + lock.getPath()); } } catch (BadVersionException e) { throw new IllegalStateException(e);