HBASE-23037 Make the split WAL related log more readable (#632)

Signed-off-by: Duo Zhang <zhangduo@apache.org>
This commit is contained in:
Guanghao Zhang 2019-09-18 21:44:05 +08:00 committed by GitHub
parent 18ac61091d
commit 25bcc91dab
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 24 additions and 13 deletions

View File

@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination.SplitLog
import org.apache.hadoop.hbase.log.HBaseMarkers; import org.apache.hadoop.hbase.log.HBaseMarkers;
import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.procedure2.util.StringUtils;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.HasThread; import org.apache.hadoop.hbase.util.HasThread;
@ -289,10 +290,12 @@ public class SplitLogManager {
} }
SplitLogCounters.tot_mgr_log_split_batch_success.increment(); SplitLogCounters.tot_mgr_log_split_batch_success.increment();
} }
String msg = "Finished splitting (more than or equal to) " + totalSize + String msg =
" bytes in " + ((batch == null)? 0: batch.installed) + "Finished splitting (more than or equal to) " + StringUtils.humanSize(totalSize) + " (" +
" log files in " + logDirs + " in " + totalSize + " bytes) in " + ((batch == null) ? 0 : batch.installed) + " log files in " +
((startTime == 0)? startTime: (EnvironmentEdgeManager.currentTime() - startTime)) + "ms"; logDirs + " in " +
((startTime == 0) ? startTime : (EnvironmentEdgeManager.currentTime() - startTime)) +
"ms";
status.markComplete(msg); status.markComplete(msg);
LOG.info(msg); LOG.info(msg);
return totalSize; return totalSize;

View File

@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.wal;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -73,7 +72,7 @@ public class BoundedLogWriterCreationOutputSink extends LogRecoveredEditsOutputS
for (final Map.Entry<byte[], WALSplitter.RegionEntryBuffer> buffer : entryBuffers.buffers for (final Map.Entry<byte[], WALSplitter.RegionEntryBuffer> buffer : entryBuffers.buffers
.entrySet()) { .entrySet()) {
LOG.info("Submitting writeThenClose of {}", LOG.info("Submitting writeThenClose of {}",
Arrays.toString(buffer.getValue().encodedRegionName)); Bytes.toString(buffer.getValue().encodedRegionName));
completionService.submit(new Callable<Void>() { completionService.submit(new Callable<Void>() {
@Override @Override
public Void call() throws Exception { public Void call() throws Exception {
@ -148,4 +147,4 @@ public class BoundedLogWriterCreationOutputSink extends LogRecoveredEditsOutputS
} }
return dst; return dst;
} }
} }

View File

@ -44,11 +44,13 @@ import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.master.SplitLogManager; import org.apache.hadoop.hbase.master.SplitLogManager;
import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.procedure2.util.StringUtils;
import org.apache.hadoop.hbase.regionserver.LastSequenceId; import org.apache.hadoop.hbase.regionserver.LastSequenceId;
import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec; import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.util.CancelableProgressable;
import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WAL.Entry;
import org.apache.hadoop.hbase.wal.WAL.Reader; import org.apache.hadoop.hbase.wal.WAL.Reader;
@ -221,9 +223,11 @@ public class WALSplitter {
"Splitting log file " + logfile.getPath() + "into a temporary staging area."); "Splitting log file " + logfile.getPath() + "into a temporary staging area.");
Reader logFileReader = null; Reader logFileReader = null;
this.fileBeingSplit = logfile; this.fileBeingSplit = logfile;
long startTS = EnvironmentEdgeManager.currentTime();
try { try {
long logLength = logfile.getLen(); long logLength = logfile.getLen();
LOG.info("Splitting WAL={}, length={}", logPath, logLength); LOG.info("Splitting WAL={}, size={} ({} bytes)", logPath, StringUtils.humanSize(logLength),
logLength);
status.setStatus("Opening log file"); status.setStatus("Opening log file");
if (reporter != null && !reporter.progress()) { if (reporter != null && !reporter.progress()) {
progress_failed = true; progress_failed = true;
@ -234,6 +238,8 @@ public class WALSplitter {
LOG.warn("Nothing to split in WAL={}", logPath); LOG.warn("Nothing to split in WAL={}", logPath);
return true; return true;
} }
long openCost = EnvironmentEdgeManager.currentTime() - startTS;
LOG.info("Open WAL={} cost {} ms", logPath, openCost);
int numOpenedFilesBeforeReporting = conf.getInt("hbase.splitlog.report.openedfiles", 3); int numOpenedFilesBeforeReporting = conf.getInt("hbase.splitlog.report.openedfiles", 3);
int numOpenedFilesLastCheck = 0; int numOpenedFilesLastCheck = 0;
outputSink.setReporter(reporter); outputSink.setReporter(reporter);
@ -241,6 +247,7 @@ public class WALSplitter {
outputSinkStarted = true; outputSinkStarted = true;
Entry entry; Entry entry;
Long lastFlushedSequenceId = -1L; Long lastFlushedSequenceId = -1L;
startTS = EnvironmentEdgeManager.currentTime();
while ((entry = getNextLogLine(logFileReader, logPath, skipErrors)) != null) { while ((entry = getNextLogLine(logFileReader, logPath, skipErrors)) != null) {
byte[] region = entry.getKey().getEncodedRegionName(); byte[] region = entry.getKey().getEncodedRegionName();
String encodedRegionNameAsStr = Bytes.toString(region); String encodedRegionNameAsStr = Bytes.toString(region);
@ -324,11 +331,13 @@ public class WALSplitter {
progress_failed = outputSink.finishWritingAndClose() == null; progress_failed = outputSink.finishWritingAndClose() == null;
} }
} finally { } finally {
String msg = long processCost = EnvironmentEdgeManager.currentTime() - startTS;
"Processed " + editsCount + " edits across " + outputSink.getNumberOfRecoveredRegions() // See if length got updated post lease recovery
+ " regions; edits skipped=" + editsSkipped + "; log file=" + logPath + String msg = "Processed " + editsCount + " edits across " +
", length=" + logfile.getLen() + // See if length got updated post lease recovery outputSink.getNumberOfRecoveredRegions() + " regions cost " + processCost +
", corrupted=" + isCorrupted + ", progress failed=" + progress_failed; " ms; edits skipped=" + editsSkipped + "; WAL=" + logPath + ", size=" +
StringUtils.humanSize(logfile.getLen()) + ", length=" + logfile.getLen() +
", corrupted=" + isCorrupted + ", progress failed=" + progress_failed;
LOG.info(msg); LOG.info(msg);
status.markComplete(msg); status.markComplete(msg);
} }