HBASE-16155 Compacting Memstore : Few log improvements.

This commit is contained in:
anoopsjohn 2016-07-01 10:14:07 +05:30
parent d1d8cc71c9
commit 139f0ed53c
4 changed files with 24 additions and 24 deletions

View File

@ -92,7 +92,7 @@ public class CompactingMemStore extends AbstractMemStore {
double factor = conf.getDouble(IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY,
IN_MEMORY_FLUSH_THRESHOLD_FACTOR_DEFAULT);
inmemoryFlushSize *= factor;
LOG.debug("Setting in-memory flush size threshold to " + inmemoryFlushSize);
LOG.info("Setting in-memory flush size threshold to " + inmemoryFlushSize);
}
public static long getSegmentSize(Segment segment) {
@ -150,8 +150,11 @@ public class CompactingMemStore extends AbstractMemStore {
LOG.warn("Snapshot called again without clearing previous. " +
"Doing nothing. Another ongoing flush or did we fail last attempt?");
} else {
LOG.info("FLUSHING TO DISK: region "+ getRegionServices().getRegionInfo()
.getRegionNameAsString() + "store: "+ getFamilyName());
if (LOG.isDebugEnabled()) {
LOG.debug("FLUSHING TO DISK: region "
+ getRegionServices().getRegionInfo().getRegionNameAsString() + "store: "
+ getFamilyName());
}
stopCompaction();
pushActiveToPipeline(active);
snapshotId = EnvironmentEdgeManager.currentTime();
@ -275,8 +278,10 @@ public class CompactingMemStore extends AbstractMemStore {
getRegionServices().blockUpdates();
try {
MutableSegment active = getActive();
LOG.info("IN-MEMORY FLUSH: Pushing active segment into compaction pipeline, " +
"and initiating compaction.");
if (LOG.isDebugEnabled()) {
LOG.debug("IN-MEMORY FLUSH: Pushing active segment into compaction pipeline, "
+ "and initiating compaction.");
}
pushActiveToPipeline(active);
} finally {
getRegionServices().unblockUpdates();
@ -398,13 +403,4 @@ public class CompactingMemStore extends AbstractMemStore {
}
return lowest;
}
// debug method
private void debug() {
String msg = "active size="+getActive().getSize();
msg += " threshold="+IN_MEMORY_FLUSH_THRESHOLD_FACTOR_DEFAULT* inmemoryFlushSize;
msg += " allow compaction is "+ (allowCompaction.get() ? "true" : "false");
msg += " inMemoryFlushInProgress is "+ (inMemoryFlushInProgress.get() ? "true" : "false");
LOG.debug(msg);
}
}

View File

@ -95,10 +95,12 @@ public class CompactionPipeline {
return false;
}
suffix = versionedList.getStoreSegments();
LOG.info("Swapping pipeline suffix with compacted item. "
+"Just before the swap the number of segments in pipeline is:"
+versionedList.getStoreSegments().size()
+", and the number of cells in new segment is:"+segment.getCellsCount());
if (LOG.isDebugEnabled()) {
LOG.debug("Swapping pipeline suffix with compacted item. "
+ "Just before the swap the number of segments in pipeline is:"
+ versionedList.getStoreSegments().size()
+ ", and the number of cells in new segment is:" + segment.getCellsCount());
}
swapSuffix(suffix,segment);
}
if(region != null) {
@ -107,8 +109,10 @@ public class CompactionPipeline {
long newSize = CompactingMemStore.getSegmentSize(segment);
long delta = suffixSize - newSize;
long globalMemstoreSize = region.addAndGetGlobalMemstoreSize(-delta);
LOG.info("Suffix size: "+ suffixSize+" compacted item size: "+newSize+
" globalMemstoreSize: "+globalMemstoreSize);
if (LOG.isDebugEnabled()) {
LOG.debug("Suffix size: " + suffixSize + " compacted item size: " + newSize
+ " globalMemstoreSize: " + globalMemstoreSize);
}
}
return true;
}

View File

@ -84,8 +84,10 @@ class MemStoreCompactor {
smallestReadPoint = compactingMemStore.getSmallestReadPoint();
compactingScanner = createScanner(compactingMemStore.getStore());
LOG.info("Starting the MemStore in-memory compaction for store " +
compactingMemStore.getStore().getColumnFamilyName());
if (LOG.isDebugEnabled()) {
LOG.debug("Starting the MemStore in-memory compaction for store "
+ compactingMemStore.getStore().getColumnFamilyName());
}
doCompaction();
return true;

View File

@ -23,7 +23,6 @@ import java.util.SortedSet;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.KeyValue;
@ -42,7 +41,6 @@ import org.apache.hadoop.hbase.util.ByteRange;
@InterfaceAudience.Private
public abstract class Segment {
private static final Log LOG = LogFactory.getLog(Segment.class);
private volatile CellSet cellSet;
private final CellComparator comparator;
private long minSequenceId;