diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java index a996edf79f4..0ca8af03cf7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java @@ -92,7 +92,7 @@ public class CompactingMemStore extends AbstractMemStore { double factor = conf.getDouble(IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, IN_MEMORY_FLUSH_THRESHOLD_FACTOR_DEFAULT); inmemoryFlushSize *= factor; - LOG.debug("Setting in-memory flush size threshold to " + inmemoryFlushSize); + LOG.info("Setting in-memory flush size threshold to " + inmemoryFlushSize); } public static long getSegmentSize(Segment segment) { @@ -150,8 +150,11 @@ public class CompactingMemStore extends AbstractMemStore { LOG.warn("Snapshot called again without clearing previous. " + "Doing nothing. Another ongoing flush or did we fail last attempt?"); } else { - LOG.info("FLUSHING TO DISK: region "+ getRegionServices().getRegionInfo() - .getRegionNameAsString() + "store: "+ getFamilyName()); + if (LOG.isDebugEnabled()) { + LOG.debug("FLUSHING TO DISK: region " + + getRegionServices().getRegionInfo().getRegionNameAsString() + "store: " + + getFamilyName()); + } stopCompaction(); pushActiveToPipeline(active); snapshotId = EnvironmentEdgeManager.currentTime(); @@ -275,8 +278,10 @@ public class CompactingMemStore extends AbstractMemStore { getRegionServices().blockUpdates(); try { MutableSegment active = getActive(); - LOG.info("IN-MEMORY FLUSH: Pushing active segment into compaction pipeline, " + - "and initiating compaction."); + if (LOG.isDebugEnabled()) { + LOG.debug("IN-MEMORY FLUSH: Pushing active segment into compaction pipeline, " + + "and initiating compaction."); + } pushActiveToPipeline(active); } finally { getRegionServices().unblockUpdates(); @@ -398,13 +403,4 @@ public class CompactingMemStore extends AbstractMemStore { } return lowest; } - - // debug method - private void debug() { - String msg = "active size="+getActive().getSize(); - msg += " threshold="+IN_MEMORY_FLUSH_THRESHOLD_FACTOR_DEFAULT* inmemoryFlushSize; - msg += " allow compaction is "+ (allowCompaction.get() ? "true" : "false"); - msg += " inMemoryFlushInProgress is "+ (inMemoryFlushInProgress.get() ? "true" : "false"); - LOG.debug(msg); - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java index e33ceae58dd..3ecd11c1ba1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java @@ -95,10 +95,12 @@ public class CompactionPipeline { return false; } suffix = versionedList.getStoreSegments(); - LOG.info("Swapping pipeline suffix with compacted item. " - +"Just before the swap the number of segments in pipeline is:" - +versionedList.getStoreSegments().size() - +", and the number of cells in new segment is:"+segment.getCellsCount()); + if (LOG.isDebugEnabled()) { + LOG.debug("Swapping pipeline suffix with compacted item. " + + "Just before the swap the number of segments in pipeline is:" + + versionedList.getStoreSegments().size() + + ", and the number of cells in new segment is:" + segment.getCellsCount()); + } swapSuffix(suffix,segment); } if(region != null) { @@ -107,8 +109,10 @@ public class CompactionPipeline { long newSize = CompactingMemStore.getSegmentSize(segment); long delta = suffixSize - newSize; long globalMemstoreSize = region.addAndGetGlobalMemstoreSize(-delta); - LOG.info("Suffix size: "+ suffixSize+" compacted item size: "+newSize+ - " globalMemstoreSize: "+globalMemstoreSize); + if (LOG.isDebugEnabled()) { + LOG.debug("Suffix size: " + suffixSize + " compacted item size: " + newSize + + " globalMemstoreSize: " + globalMemstoreSize); + } } return true; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java index 691ebb9c507..a363e95bbf8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java @@ -84,8 +84,10 @@ class MemStoreCompactor { smallestReadPoint = compactingMemStore.getSmallestReadPoint(); compactingScanner = createScanner(compactingMemStore.getStore()); - LOG.info("Starting the MemStore in-memory compaction for store " + - compactingMemStore.getStore().getColumnFamilyName()); + if (LOG.isDebugEnabled()) { + LOG.debug("Starting the MemStore in-memory compaction for store " + + compactingMemStore.getStore().getColumnFamilyName()); + } doCompaction(); return true; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java index 64352321a5c..dd824c1f95d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java @@ -23,7 +23,6 @@ import java.util.SortedSet; import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.KeyValue; @@ -42,7 +41,6 @@ import org.apache.hadoop.hbase.util.ByteRange; @InterfaceAudience.Private public abstract class Segment { - private static final Log LOG = LogFactory.getLog(Segment.class); private volatile CellSet cellSet; private final CellComparator comparator; private long minSequenceId;