HBASE-26552 Introduce retry to logroller to avoid abort (#4038)

Signed-off-by: Andrew Purtell <apurtell@apache.org>
This commit is contained in:
Xiaolin Ha 2022-03-07 12:20:01 +08:00 committed by GitHub
parent f3a48d1910
commit 591f7819f3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 49 additions and 10 deletions

View File

@ -60,6 +60,18 @@ public abstract class AbstractWALRoller<T extends Abortable> extends Thread
protected static final String WAL_ROLL_PERIOD_KEY = "hbase.regionserver.logroll.period"; protected static final String WAL_ROLL_PERIOD_KEY = "hbase.regionserver.logroll.period";
/**
* Configure for the timeout of log rolling retry.
*/
protected static final String WAL_ROLL_WAIT_TIMEOUT = "hbase.regionserver.logroll.wait.timeout.ms";
/**
* Configure for the max count of log rolling retry.
* The real retry count is also limited by the timeout of log rolling
* via {@link #WAL_ROLL_WAIT_TIMEOUT}
*/
protected static final String WAL_ROLL_RETRIES = "hbase.regionserver.logroll.retries";
protected final ConcurrentMap<WAL, RollController> wals = new ConcurrentHashMap<>(); protected final ConcurrentMap<WAL, RollController> wals = new ConcurrentHashMap<>();
protected final T abortable; protected final T abortable;
// Period to roll log. // Period to roll log.
@ -67,6 +79,10 @@ public abstract class AbstractWALRoller<T extends Abortable> extends Thread
private final int threadWakeFrequency; private final int threadWakeFrequency;
// The interval to check low replication on hlog's pipeline // The interval to check low replication on hlog's pipeline
private final long checkLowReplicationInterval; private final long checkLowReplicationInterval;
// Wait period for roll log
private final long rollWaitTimeout;
// Max retry for roll log
private final int maxRollRetry;
private volatile boolean running = true; private volatile boolean running = true;
@ -114,6 +130,9 @@ public abstract class AbstractWALRoller<T extends Abortable> extends Thread
this.threadWakeFrequency = conf.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000); this.threadWakeFrequency = conf.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000);
this.checkLowReplicationInterval = this.checkLowReplicationInterval =
conf.getLong("hbase.regionserver.hlog.check.lowreplication.interval", 30 * 1000); conf.getLong("hbase.regionserver.hlog.check.lowreplication.interval", 30 * 1000);
this.rollWaitTimeout = conf.getLong(WAL_ROLL_WAIT_TIMEOUT, 30000);
// retry rolling does not have to be the default behavior, so the default value is 0 here
this.maxRollRetry = conf.getInt(WAL_ROLL_RETRIES, 0);
} }
/** /**
@ -184,19 +203,39 @@ public abstract class AbstractWALRoller<T extends Abortable> extends Thread
} else { } else {
continue; continue;
} }
Map<byte[], List<byte[]>> regionsToFlush = null;
int nAttempts = 0;
long startWaiting = EnvironmentEdgeManager.currentTime();
do {
try { try {
// Force the roll if the logroll.period is elapsed or if a roll was requested. // Force the roll if the logroll.period is elapsed or if a roll was requested.
// The returned value is an collection of actual region and family names. // The returned value is an collection of actual region and family names.
Map<byte[], List<byte[]>> regionsToFlush = controller.rollWal(now); regionsToFlush = controller.rollWal(EnvironmentEdgeManager.currentTime());
break;
} catch (IOException ioe) {
if (ioe instanceof WALClosedException) {
LOG.warn("WAL has been closed. Skipping rolling of writer and just remove it", ioe);
iter.remove();
break;
}
long waitingTime = EnvironmentEdgeManager.currentTime() - startWaiting;
if (waitingTime < rollWaitTimeout && nAttempts < maxRollRetry) {
nAttempts++;
LOG.warn("Retry to roll log, nAttempts={}, waiting time={}ms, sleeping 1s to retry,"
+ " last excepiton= {}", nAttempts, waitingTime,
ioe.getCause().getClass().getSimpleName());
sleep(1000);
} else {
LOG.error("Roll wal failed and waiting timeout, will not retry", ioe);
throw ioe;
}
}
} while (EnvironmentEdgeManager.currentTime() - startWaiting < rollWaitTimeout);
if (regionsToFlush != null) { if (regionsToFlush != null) {
for (Map.Entry<byte[], List<byte[]>> r : regionsToFlush.entrySet()) { for (Map.Entry<byte[], List<byte[]>> r : regionsToFlush.entrySet()) {
scheduleFlush(Bytes.toString(r.getKey()), r.getValue()); scheduleFlush(Bytes.toString(r.getKey()), r.getValue());
} }
} }
} catch (WALClosedException e) {
LOG.warn("WAL has been closed. Skipping rolling of writer and just remove it", e);
iter.remove();
}
} }
} catch (FailedLogCloseException | ConnectException e) { } catch (FailedLogCloseException | ConnectException e) {
abort("Failed log close in log roller", e); abort("Failed log close in log roller", e);