HBASE-23037 Make the split WAL related log more readable (#632)
Signed-off-by: Duo Zhang <zhangduo@apache.org>
This commit is contained in:
parent
18ac61091d
commit
25bcc91dab
|
@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination.SplitLog
|
|||
import org.apache.hadoop.hbase.log.HBaseMarkers;
|
||||
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
|
||||
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
|
||||
import org.apache.hadoop.hbase.procedure2.util.StringUtils;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.hbase.util.HasThread;
|
||||
|
@ -289,10 +290,12 @@ public class SplitLogManager {
|
|||
}
|
||||
SplitLogCounters.tot_mgr_log_split_batch_success.increment();
|
||||
}
|
||||
String msg = "Finished splitting (more than or equal to) " + totalSize +
|
||||
" bytes in " + ((batch == null)? 0: batch.installed) +
|
||||
" log files in " + logDirs + " in " +
|
||||
((startTime == 0)? startTime: (EnvironmentEdgeManager.currentTime() - startTime)) + "ms";
|
||||
String msg =
|
||||
"Finished splitting (more than or equal to) " + StringUtils.humanSize(totalSize) + " (" +
|
||||
totalSize + " bytes) in " + ((batch == null) ? 0 : batch.installed) + " log files in " +
|
||||
logDirs + " in " +
|
||||
((startTime == 0) ? startTime : (EnvironmentEdgeManager.currentTime() - startTime)) +
|
||||
"ms";
|
||||
status.markComplete(msg);
|
||||
LOG.info(msg);
|
||||
return totalSize;
|
||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.wal;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -73,7 +72,7 @@ public class BoundedLogWriterCreationOutputSink extends LogRecoveredEditsOutputS
|
|||
for (final Map.Entry<byte[], WALSplitter.RegionEntryBuffer> buffer : entryBuffers.buffers
|
||||
.entrySet()) {
|
||||
LOG.info("Submitting writeThenClose of {}",
|
||||
Arrays.toString(buffer.getValue().encodedRegionName));
|
||||
Bytes.toString(buffer.getValue().encodedRegionName));
|
||||
completionService.submit(new Callable<Void>() {
|
||||
@Override
|
||||
public Void call() throws Exception {
|
||||
|
@ -148,4 +147,4 @@ public class BoundedLogWriterCreationOutputSink extends LogRecoveredEditsOutputS
|
|||
}
|
||||
return dst;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -44,11 +44,13 @@ import org.apache.hadoop.hbase.io.HeapSize;
|
|||
import org.apache.hadoop.hbase.master.SplitLogManager;
|
||||
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
|
||||
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
|
||||
import org.apache.hadoop.hbase.procedure2.util.StringUtils;
|
||||
import org.apache.hadoop.hbase.regionserver.LastSequenceId;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.CancelableProgressable;
|
||||
import org.apache.hadoop.hbase.util.ClassSize;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.hbase.wal.WAL.Entry;
|
||||
import org.apache.hadoop.hbase.wal.WAL.Reader;
|
||||
|
@ -221,9 +223,11 @@ public class WALSplitter {
|
|||
"Splitting log file " + logfile.getPath() + "into a temporary staging area.");
|
||||
Reader logFileReader = null;
|
||||
this.fileBeingSplit = logfile;
|
||||
long startTS = EnvironmentEdgeManager.currentTime();
|
||||
try {
|
||||
long logLength = logfile.getLen();
|
||||
LOG.info("Splitting WAL={}, length={}", logPath, logLength);
|
||||
LOG.info("Splitting WAL={}, size={} ({} bytes)", logPath, StringUtils.humanSize(logLength),
|
||||
logLength);
|
||||
status.setStatus("Opening log file");
|
||||
if (reporter != null && !reporter.progress()) {
|
||||
progress_failed = true;
|
||||
|
@ -234,6 +238,8 @@ public class WALSplitter {
|
|||
LOG.warn("Nothing to split in WAL={}", logPath);
|
||||
return true;
|
||||
}
|
||||
long openCost = EnvironmentEdgeManager.currentTime() - startTS;
|
||||
LOG.info("Open WAL={} cost {} ms", logPath, openCost);
|
||||
int numOpenedFilesBeforeReporting = conf.getInt("hbase.splitlog.report.openedfiles", 3);
|
||||
int numOpenedFilesLastCheck = 0;
|
||||
outputSink.setReporter(reporter);
|
||||
|
@ -241,6 +247,7 @@ public class WALSplitter {
|
|||
outputSinkStarted = true;
|
||||
Entry entry;
|
||||
Long lastFlushedSequenceId = -1L;
|
||||
startTS = EnvironmentEdgeManager.currentTime();
|
||||
while ((entry = getNextLogLine(logFileReader, logPath, skipErrors)) != null) {
|
||||
byte[] region = entry.getKey().getEncodedRegionName();
|
||||
String encodedRegionNameAsStr = Bytes.toString(region);
|
||||
|
@ -324,11 +331,13 @@ public class WALSplitter {
|
|||
progress_failed = outputSink.finishWritingAndClose() == null;
|
||||
}
|
||||
} finally {
|
||||
String msg =
|
||||
"Processed " + editsCount + " edits across " + outputSink.getNumberOfRecoveredRegions()
|
||||
+ " regions; edits skipped=" + editsSkipped + "; log file=" + logPath +
|
||||
", length=" + logfile.getLen() + // See if length got updated post lease recovery
|
||||
", corrupted=" + isCorrupted + ", progress failed=" + progress_failed;
|
||||
long processCost = EnvironmentEdgeManager.currentTime() - startTS;
|
||||
// See if length got updated post lease recovery
|
||||
String msg = "Processed " + editsCount + " edits across " +
|
||||
outputSink.getNumberOfRecoveredRegions() + " regions cost " + processCost +
|
||||
" ms; edits skipped=" + editsSkipped + "; WAL=" + logPath + ", size=" +
|
||||
StringUtils.humanSize(logfile.getLen()) + ", length=" + logfile.getLen() +
|
||||
", corrupted=" + isCorrupted + ", progress failed=" + progress_failed;
|
||||
LOG.info(msg);
|
||||
status.markComplete(msg);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue