HBASE-17085 AsyncFSWAL may issue unnecessary AsyncDFSOutput.sync
This commit is contained in:
parent
80acc2dca5
commit
ab55bad899
|
@ -37,6 +37,7 @@ import java.util.concurrent.ConcurrentNavigableMap;
|
||||||
import java.util.concurrent.ConcurrentSkipListMap;
|
import java.util.concurrent.ConcurrentSkipListMap;
|
||||||
import java.util.concurrent.CopyOnWriteArrayList;
|
import java.util.concurrent.CopyOnWriteArrayList;
|
||||||
import java.util.concurrent.ExecutionException;
|
import java.util.concurrent.ExecutionException;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.concurrent.atomic.AtomicBoolean;
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
import java.util.concurrent.atomic.AtomicLong;
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
|
@ -57,6 +58,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
|
import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
|
||||||
import org.apache.hadoop.hbase.io.util.HeapMemorySizeUtil;
|
import org.apache.hadoop.hbase.io.util.HeapMemorySizeUtil;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
import org.apache.hadoop.hbase.util.CollectionUtils;
|
||||||
import org.apache.hadoop.hbase.util.DrainBarrier;
|
import org.apache.hadoop.hbase.util.DrainBarrier;
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
|
@ -163,9 +165,9 @@ public abstract class AbstractFSWAL<W> implements WAL {
|
||||||
/** The barrier used to ensure that close() waits for all log rolls and flushes to finish. */
|
/** The barrier used to ensure that close() waits for all log rolls and flushes to finish. */
|
||||||
protected final DrainBarrier closeBarrier = new DrainBarrier();
|
protected final DrainBarrier closeBarrier = new DrainBarrier();
|
||||||
|
|
||||||
protected final int slowSyncNs;
|
protected final long slowSyncNs;
|
||||||
|
|
||||||
private final long walSyncTimeout;
|
private final long walSyncTimeoutNs;
|
||||||
|
|
||||||
// If > than this size, roll the log.
|
// If > than this size, roll the log.
|
||||||
protected final long logrollsize;
|
protected final long logrollsize;
|
||||||
|
@ -221,12 +223,8 @@ public abstract class AbstractFSWAL<W> implements WAL {
|
||||||
* WAL Comparator; it compares the timestamp (log filenum), present in the log file name. Throws
|
* WAL Comparator; it compares the timestamp (log filenum), present in the log file name. Throws
|
||||||
* an IllegalArgumentException if used to compare paths from different wals.
|
* an IllegalArgumentException if used to compare paths from different wals.
|
||||||
*/
|
*/
|
||||||
final Comparator<Path> LOG_NAME_COMPARATOR = new Comparator<Path>() {
|
final Comparator<Path> LOG_NAME_COMPARATOR =
|
||||||
@Override
|
(o1, o2) -> Long.compare(getFileNumFromFileName(o1), getFileNumFromFileName(o2));
|
||||||
public int compare(Path o1, Path o2) {
|
|
||||||
return Long.compare(getFileNumFromFileName(o1), getFileNumFromFileName(o2));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
private static final class WalProps {
|
private static final class WalProps {
|
||||||
|
|
||||||
|
@ -258,7 +256,7 @@ public abstract class AbstractFSWAL<W> implements WAL {
|
||||||
/**
|
/**
|
||||||
* Map of {@link SyncFuture}s keyed by Handler objects. Used so we reuse SyncFutures.
|
* Map of {@link SyncFuture}s keyed by Handler objects. Used so we reuse SyncFutures.
|
||||||
* <p>
|
* <p>
|
||||||
* TODO: Reus FSWALEntry's rather than create them anew each time as we do SyncFutures here.
|
* TODO: Reuse FSWALEntry's rather than create them anew each time as we do SyncFutures here.
|
||||||
* <p>
|
* <p>
|
||||||
* TODO: Add a FSWalEntry and SyncFuture as thread locals on handlers rather than have them get
|
* TODO: Add a FSWalEntry and SyncFuture as thread locals on handlers rather than have them get
|
||||||
* them from this Map?
|
* them from this Map?
|
||||||
|
@ -400,10 +398,10 @@ public abstract class AbstractFSWAL<W> implements WAL {
|
||||||
LOG.info("WAL configuration: blocksize=" + StringUtils.byteDesc(blocksize) + ", rollsize="
|
LOG.info("WAL configuration: blocksize=" + StringUtils.byteDesc(blocksize) + ", rollsize="
|
||||||
+ StringUtils.byteDesc(this.logrollsize) + ", prefix=" + this.walFilePrefix + ", suffix="
|
+ StringUtils.byteDesc(this.logrollsize) + ", prefix=" + this.walFilePrefix + ", suffix="
|
||||||
+ walFileSuffix + ", logDir=" + this.walDir + ", archiveDir=" + this.walArchiveDir);
|
+ walFileSuffix + ", logDir=" + this.walDir + ", archiveDir=" + this.walArchiveDir);
|
||||||
this.slowSyncNs =
|
this.slowSyncNs = TimeUnit.MILLISECONDS
|
||||||
1000000 * conf.getInt("hbase.regionserver.hlog.slowsync.ms", DEFAULT_SLOW_SYNC_TIME_MS);
|
.toNanos(conf.getInt("hbase.regionserver.hlog.slowsync.ms", DEFAULT_SLOW_SYNC_TIME_MS));
|
||||||
this.walSyncTimeout = conf.getLong("hbase.regionserver.hlog.sync.timeout",
|
this.walSyncTimeoutNs = TimeUnit.MILLISECONDS
|
||||||
DEFAULT_WAL_SYNC_TIMEOUT_MS);
|
.toNanos(conf.getLong("hbase.regionserver.hlog.sync.timeout", DEFAULT_WAL_SYNC_TIMEOUT_MS));
|
||||||
int maxHandlersCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, 200);
|
int maxHandlersCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, 200);
|
||||||
// Presize our map of SyncFutures by handler objects.
|
// Presize our map of SyncFutures by handler objects.
|
||||||
this.syncFuturesByHandler = new ConcurrentHashMap<Thread, SyncFuture>(maxHandlersCount);
|
this.syncFuturesByHandler = new ConcurrentHashMap<Thread, SyncFuture>(maxHandlersCount);
|
||||||
|
@ -682,7 +680,7 @@ public abstract class AbstractFSWAL<W> implements WAL {
|
||||||
protected Span blockOnSync(final SyncFuture syncFuture) throws IOException {
|
protected Span blockOnSync(final SyncFuture syncFuture) throws IOException {
|
||||||
// Now we have published the ringbuffer, halt the current thread until we get an answer back.
|
// Now we have published the ringbuffer, halt the current thread until we get an answer back.
|
||||||
try {
|
try {
|
||||||
syncFuture.get(walSyncTimeout);
|
syncFuture.get(walSyncTimeoutNs);
|
||||||
return syncFuture.getSpan();
|
return syncFuture.getSpan();
|
||||||
} catch (TimeoutIOException tioe) {
|
} catch (TimeoutIOException tioe) {
|
||||||
// SyncFuture reuse by thread, if TimeoutIOException happens, ringbuffer
|
// SyncFuture reuse by thread, if TimeoutIOException happens, ringbuffer
|
||||||
|
@ -840,15 +838,10 @@ public abstract class AbstractFSWAL<W> implements WAL {
|
||||||
sequenceIdAccounting.updateStore(encodedRegionName, familyName, sequenceid, onlyIfGreater);
|
sequenceIdAccounting.updateStore(encodedRegionName, familyName, sequenceid, onlyIfGreater);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected SyncFuture getSyncFuture(final long sequence, Span span) {
|
protected SyncFuture getSyncFuture(long sequence, Span span) {
|
||||||
SyncFuture syncFuture = this.syncFuturesByHandler.get(Thread.currentThread());
|
return CollectionUtils
|
||||||
if (syncFuture == null) {
|
.computeIfAbsent(syncFuturesByHandler, Thread.currentThread(), SyncFuture::new)
|
||||||
syncFuture = new SyncFuture(sequence, span);
|
.reset(sequence, span);
|
||||||
this.syncFuturesByHandler.put(Thread.currentThread(), syncFuture);
|
|
||||||
} else {
|
|
||||||
syncFuture.reset(sequence, span);
|
|
||||||
}
|
|
||||||
return syncFuture;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void requestLogRoll(boolean tooFewReplicas) {
|
protected void requestLogRoll(boolean tooFewReplicas) {
|
||||||
|
|
|
@ -35,8 +35,9 @@ import java.util.Comparator;
|
||||||
import java.util.Deque;
|
import java.util.Deque;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.PriorityQueue;
|
|
||||||
import java.util.Queue;
|
import java.util.Queue;
|
||||||
|
import java.util.SortedSet;
|
||||||
|
import java.util.TreeSet;
|
||||||
import java.util.concurrent.ExecutorService;
|
import java.util.concurrent.ExecutorService;
|
||||||
import java.util.concurrent.Executors;
|
import java.util.concurrent.Executors;
|
||||||
import java.util.concurrent.atomic.AtomicBoolean;
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
|
@ -109,8 +110,8 @@ import org.apache.htrace.TraceScope;
|
||||||
* {@link #readyForRolling} to false, and then wait on {@link #readyForRolling}(see
|
* {@link #readyForRolling} to false, and then wait on {@link #readyForRolling}(see
|
||||||
* {@link #waitForSafePoint()}).</li>
|
* {@link #waitForSafePoint()}).</li>
|
||||||
* <li>In the consumer thread, we will stop polling entries from {@link #waitingConsumePayloads} if
|
* <li>In the consumer thread, we will stop polling entries from {@link #waitingConsumePayloads} if
|
||||||
* {@link #waitingRoll} is true, and also stop writing the entries in {@link #toWriteAppends}
|
* {@link #waitingRoll} is true, and also stop writing the entries in {@link #toWriteAppends} out.
|
||||||
* out.</li>
|
* </li>
|
||||||
* <li>If there are unflush data in the writer, sync them.</li>
|
* <li>If there are unflush data in the writer, sync them.</li>
|
||||||
* <li>When all out-going sync request is finished, i.e, the {@link #unackedAppends} is empty,
|
* <li>When all out-going sync request is finished, i.e, the {@link #unackedAppends} is empty,
|
||||||
* signal the {@link #readyForRollingCond}.</li>
|
* signal the {@link #readyForRollingCond}.</li>
|
||||||
|
@ -179,8 +180,7 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
|
||||||
|
|
||||||
private final Deque<FSWALEntry> unackedAppends = new ArrayDeque<>();
|
private final Deque<FSWALEntry> unackedAppends = new ArrayDeque<>();
|
||||||
|
|
||||||
private final PriorityQueue<SyncFuture> syncFutures =
|
private final SortedSet<SyncFuture> syncFutures = new TreeSet<SyncFuture>(SEQ_COMPARATOR);
|
||||||
new PriorityQueue<SyncFuture>(11, SEQ_COMPARATOR);
|
|
||||||
|
|
||||||
// the highest txid of WAL entries being processed
|
// the highest txid of WAL entries being processed
|
||||||
private long highestProcessedAppendTxid;
|
private long highestProcessedAppendTxid;
|
||||||
|
@ -188,6 +188,8 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
|
||||||
// file length when we issue last sync request on the writer
|
// file length when we issue last sync request on the writer
|
||||||
private long fileLengthAtLastSync;
|
private long fileLengthAtLastSync;
|
||||||
|
|
||||||
|
private long highestProcessedAppendTxidAtLastSync;
|
||||||
|
|
||||||
public AsyncFSWAL(FileSystem fs, Path rootDir, String logDir, String archiveDir,
|
public AsyncFSWAL(FileSystem fs, Path rootDir, String logDir, String archiveDir,
|
||||||
Configuration conf, List<WALActionsListener> listeners, boolean failIfWALExists,
|
Configuration conf, List<WALActionsListener> listeners, boolean failIfWALExists,
|
||||||
String prefix, String suffix, EventLoop eventLoop)
|
String prefix, String suffix, EventLoop eventLoop)
|
||||||
|
@ -286,7 +288,6 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
|
||||||
|
|
||||||
private void syncCompleted(AsyncWriter writer, long processedTxid, long startTimeNs) {
|
private void syncCompleted(AsyncWriter writer, long processedTxid, long startTimeNs) {
|
||||||
highestSyncedTxid.set(processedTxid);
|
highestSyncedTxid.set(processedTxid);
|
||||||
int syncCount = finishSync(true);
|
|
||||||
for (Iterator<FSWALEntry> iter = unackedAppends.iterator(); iter.hasNext();) {
|
for (Iterator<FSWALEntry> iter = unackedAppends.iterator(); iter.hasNext();) {
|
||||||
if (iter.next().getTxid() <= processedTxid) {
|
if (iter.next().getTxid() <= processedTxid) {
|
||||||
iter.remove();
|
iter.remove();
|
||||||
|
@ -294,7 +295,7 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
postSync(System.nanoTime() - startTimeNs, syncCount);
|
postSync(System.nanoTime() - startTimeNs, finishSync(true));
|
||||||
// Ideally, we should set a flag to indicate that the log roll has already been requested for
|
// Ideally, we should set a flag to indicate that the log roll has already been requested for
|
||||||
// the current writer and give up here, and reset the flag when roll is finished. But we
|
// the current writer and give up here, and reset the flag when roll is finished. But we
|
||||||
// finish roll in the log roller thread so the flag need to be set by different thread which
|
// finish roll in the log roller thread so the flag need to be set by different thread which
|
||||||
|
@ -321,14 +322,16 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void sync(final AsyncWriter writer, final long processedTxid) {
|
private void sync(AsyncWriter writer) {
|
||||||
fileLengthAtLastSync = writer.getLength();
|
fileLengthAtLastSync = writer.getLength();
|
||||||
|
long currentHighestProcessedAppendTxid = highestProcessedAppendTxid;
|
||||||
|
highestProcessedAppendTxidAtLastSync = currentHighestProcessedAppendTxid;
|
||||||
final long startTimeNs = System.nanoTime();
|
final long startTimeNs = System.nanoTime();
|
||||||
writer.sync().whenComplete((result, error) -> {
|
writer.sync().whenComplete((result, error) -> {
|
||||||
if (error != null) {
|
if (error != null) {
|
||||||
syncFailed(error);
|
syncFailed(error);
|
||||||
} else {
|
} else {
|
||||||
syncCompleted(writer, processedTxid, startTimeNs);
|
syncCompleted(writer, currentHighestProcessedAppendTxid, startTimeNs);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -341,10 +344,11 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
|
||||||
|
|
||||||
private int finishSyncLowerThanTxid(long txid, boolean addSyncTrace) {
|
private int finishSyncLowerThanTxid(long txid, boolean addSyncTrace) {
|
||||||
int finished = 0;
|
int finished = 0;
|
||||||
for (SyncFuture sync; (sync = syncFutures.peek()) != null;) {
|
for (Iterator<SyncFuture> iter = syncFutures.iterator(); iter.hasNext();) {
|
||||||
|
SyncFuture sync = iter.next();
|
||||||
if (sync.getTxid() <= txid) {
|
if (sync.getTxid() <= txid) {
|
||||||
sync.done(txid, null);
|
sync.done(txid, null);
|
||||||
syncFutures.remove();
|
iter.remove();
|
||||||
finished++;
|
finished++;
|
||||||
if (addSyncTrace) {
|
if (addSyncTrace) {
|
||||||
addTimeAnnotation(sync, "writer synced");
|
addTimeAnnotation(sync, "writer synced");
|
||||||
|
@ -356,12 +360,13 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
|
||||||
return finished;
|
return finished;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// try advancing the highestSyncedTxid as much as possible
|
||||||
private int finishSync(boolean addSyncTrace) {
|
private int finishSync(boolean addSyncTrace) {
|
||||||
long doneTxid = highestSyncedTxid.get();
|
if (unackedAppends.isEmpty()) {
|
||||||
if (doneTxid >= highestProcessedAppendTxid) {
|
// All outstanding appends have been acked.
|
||||||
if (toWriteAppends.isEmpty()) {
|
if (toWriteAppends.isEmpty()) {
|
||||||
// all outstanding appends have been acked, just finish all syncs.
|
// Also no appends that wait to be written out, then just finished all pending syncs.
|
||||||
long maxSyncTxid = doneTxid;
|
long maxSyncTxid = highestSyncedTxid.get();
|
||||||
for (SyncFuture sync : syncFutures) {
|
for (SyncFuture sync : syncFutures) {
|
||||||
maxSyncTxid = Math.max(maxSyncTxid, sync.getTxid());
|
maxSyncTxid = Math.max(maxSyncTxid, sync.getTxid());
|
||||||
sync.done(maxSyncTxid, null);
|
sync.done(maxSyncTxid, null);
|
||||||
|
@ -379,10 +384,16 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
|
||||||
// highestProcessedAppendTxid and lowestUnprocessedAppendTxid can be finished.
|
// highestProcessedAppendTxid and lowestUnprocessedAppendTxid can be finished.
|
||||||
long lowestUnprocessedAppendTxid = toWriteAppends.peek().getTxid();
|
long lowestUnprocessedAppendTxid = toWriteAppends.peek().getTxid();
|
||||||
assert lowestUnprocessedAppendTxid > highestProcessedAppendTxid;
|
assert lowestUnprocessedAppendTxid > highestProcessedAppendTxid;
|
||||||
highestSyncedTxid.set(lowestUnprocessedAppendTxid - 1);
|
long doneTxid = lowestUnprocessedAppendTxid - 1;
|
||||||
return finishSyncLowerThanTxid(lowestUnprocessedAppendTxid - 1, addSyncTrace);
|
highestSyncedTxid.set(doneTxid);
|
||||||
|
return finishSyncLowerThanTxid(doneTxid, addSyncTrace);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
// There are still unacked appends. So let's move the highestSyncedTxid to the txid of the
|
||||||
|
// first unacked append minus 1.
|
||||||
|
long lowestUnackedAppendTxid = unackedAppends.peek().getTxid();
|
||||||
|
long doneTxid = Math.max(lowestUnackedAppendTxid - 1, highestSyncedTxid.get());
|
||||||
|
highestSyncedTxid.set(doneTxid);
|
||||||
return finishSyncLowerThanTxid(doneTxid, addSyncTrace);
|
return finishSyncLowerThanTxid(doneTxid, addSyncTrace);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -392,7 +403,7 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
|
||||||
// maybe a sync request is not queued when we issue a sync, so check here to see if we could
|
// maybe a sync request is not queued when we issue a sync, so check here to see if we could
|
||||||
// finish some.
|
// finish some.
|
||||||
finishSync(false);
|
finishSync(false);
|
||||||
long newHighestProcessedTxid = -1L;
|
long newHighestProcessedAppendTxid = -1L;
|
||||||
for (Iterator<FSWALEntry> iter = toWriteAppends.iterator(); iter.hasNext();) {
|
for (Iterator<FSWALEntry> iter = toWriteAppends.iterator(); iter.hasNext();) {
|
||||||
FSWALEntry entry = iter.next();
|
FSWALEntry entry = iter.next();
|
||||||
boolean appended;
|
boolean appended;
|
||||||
|
@ -415,7 +426,7 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
|
||||||
throw new AssertionError("should not happen", e);
|
throw new AssertionError("should not happen", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
newHighestProcessedTxid = entry.getTxid();
|
newHighestProcessedAppendTxid = entry.getTxid();
|
||||||
iter.remove();
|
iter.remove();
|
||||||
if (appended) {
|
if (appended) {
|
||||||
unackedAppends.addLast(entry);
|
unackedAppends.addLast(entry);
|
||||||
|
@ -426,42 +437,32 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
|
||||||
}
|
}
|
||||||
// if we have a newer transaction id, update it.
|
// if we have a newer transaction id, update it.
|
||||||
// otherwise, use the previous transaction id.
|
// otherwise, use the previous transaction id.
|
||||||
if (newHighestProcessedTxid > 0) {
|
if (newHighestProcessedAppendTxid > 0) {
|
||||||
highestProcessedAppendTxid = newHighestProcessedTxid;
|
highestProcessedAppendTxid = newHighestProcessedAppendTxid;
|
||||||
} else {
|
} else {
|
||||||
newHighestProcessedTxid = highestProcessedAppendTxid;
|
newHighestProcessedAppendTxid = highestProcessedAppendTxid;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (writer.getLength() - fileLengthAtLastSync >= batchSize) {
|
if (writer.getLength() - fileLengthAtLastSync >= batchSize) {
|
||||||
// sync because buffer size limit.
|
// sync because buffer size limit.
|
||||||
sync(writer, newHighestProcessedTxid);
|
sync(writer);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (writer.getLength() == fileLengthAtLastSync) {
|
if (writer.getLength() == fileLengthAtLastSync) {
|
||||||
// we haven't written anything out, just advance the highestSyncedSequence since we may only
|
// we haven't written anything out, just advance the highestSyncedSequence since we may only
|
||||||
// stamped some region sequence id.
|
// stamped some region sequence id.
|
||||||
highestSyncedTxid.set(newHighestProcessedTxid);
|
if (unackedAppends.isEmpty()) {
|
||||||
|
highestSyncedTxid.set(highestProcessedAppendTxid);
|
||||||
finishSync(false);
|
finishSync(false);
|
||||||
trySetReadyForRolling();
|
trySetReadyForRolling();
|
||||||
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// we have some unsynced data but haven't reached the batch size yet
|
// we have some unsynced data but haven't reached the batch size yet
|
||||||
if (!syncFutures.isEmpty()) {
|
if (!syncFutures.isEmpty()
|
||||||
|
&& syncFutures.last().getTxid() > highestProcessedAppendTxidAtLastSync) {
|
||||||
// we have at least one sync request
|
// we have at least one sync request
|
||||||
sync(writer, newHighestProcessedTxid);
|
sync(writer);
|
||||||
return;
|
|
||||||
}
|
|
||||||
// usually waitingRoll is false so we check it without lock first.
|
|
||||||
if (waitingRoll) {
|
|
||||||
consumeLock.lock();
|
|
||||||
try {
|
|
||||||
if (waitingRoll) {
|
|
||||||
// there is a roll request
|
|
||||||
sync(writer, newHighestProcessedTxid);
|
|
||||||
}
|
|
||||||
} finally {
|
|
||||||
consumeLock.unlock();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -474,7 +475,7 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
|
||||||
if (waitingRoll) {
|
if (waitingRoll) {
|
||||||
if (writer.getLength() > fileLengthAtLastSync) {
|
if (writer.getLength() > fileLengthAtLastSync) {
|
||||||
// issue a sync
|
// issue a sync
|
||||||
sync(writer, highestProcessedAppendTxid);
|
sync(writer);
|
||||||
} else {
|
} else {
|
||||||
if (unackedAppends.isEmpty()) {
|
if (unackedAppends.isEmpty()) {
|
||||||
readyForRolling = true;
|
readyForRolling = true;
|
||||||
|
@ -673,6 +674,7 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
|
||||||
this.fsOut = ((AsyncProtobufLogWriter) nextWriter).getOutput();
|
this.fsOut = ((AsyncProtobufLogWriter) nextWriter).getOutput();
|
||||||
}
|
}
|
||||||
this.fileLengthAtLastSync = 0L;
|
this.fileLengthAtLastSync = 0L;
|
||||||
|
this.highestProcessedAppendTxidAtLastSync = 0L;
|
||||||
consumeLock.lock();
|
consumeLock.lock();
|
||||||
try {
|
try {
|
||||||
consumerScheduled.set(true);
|
consumerScheduled.set(true);
|
||||||
|
|
|
@ -18,10 +18,10 @@
|
||||||
package org.apache.hadoop.hbase.regionserver.wal;
|
package org.apache.hadoop.hbase.regionserver.wal;
|
||||||
|
|
||||||
import java.util.concurrent.ExecutionException;
|
import java.util.concurrent.ExecutionException;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
|
import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
|
||||||
import org.apache.htrace.Span;
|
import org.apache.htrace.Span;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -73,13 +73,6 @@ class SyncFuture {
|
||||||
*/
|
*/
|
||||||
private Span span;
|
private Span span;
|
||||||
|
|
||||||
SyncFuture(long txid, Span span) {
|
|
||||||
this.t = Thread.currentThread();
|
|
||||||
this.txid = txid;
|
|
||||||
this.span = span;
|
|
||||||
this.doneTxid = NOT_DONE;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Call this method to clear old usage and get it ready for new deploy.
|
* Call this method to clear old usage and get it ready for new deploy.
|
||||||
* @param txid the new transaction id
|
* @param txid the new transaction id
|
||||||
|
@ -157,15 +150,15 @@ class SyncFuture {
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
synchronized long get(long timeout) throws InterruptedException,
|
synchronized long get(long timeoutNs) throws InterruptedException,
|
||||||
ExecutionException, TimeoutIOException {
|
ExecutionException, TimeoutIOException {
|
||||||
final long done = EnvironmentEdgeManager.currentTime() + timeout;
|
final long done = System.nanoTime() + timeoutNs;
|
||||||
while (!isDone()) {
|
while (!isDone()) {
|
||||||
wait(1000);
|
wait(1000);
|
||||||
if (EnvironmentEdgeManager.currentTime() >= done) {
|
if (System.nanoTime() >= done) {
|
||||||
throw new TimeoutIOException("Failed to get sync result after "
|
throw new TimeoutIOException(
|
||||||
+ timeout + " ms for txid=" + this.txid
|
"Failed to get sync result after " + TimeUnit.NANOSECONDS.toMillis(timeoutNs)
|
||||||
+ ", WAL system stuck?");
|
+ " ms for txid=" + this.txid + ", WAL system stuck?");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (this.throwable != null) {
|
if (this.throwable != null) {
|
||||||
|
|
|
@ -18,7 +18,6 @@
|
||||||
package org.apache.hadoop.hbase.regionserver.wal;
|
package org.apache.hadoop.hbase.regionserver.wal;
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.fail;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
|
import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
|
||||||
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
|
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
|
||||||
|
@ -29,21 +28,14 @@ import org.junit.experimental.categories.Category;
|
||||||
@Category({ RegionServerTests.class, SmallTests.class })
|
@Category({ RegionServerTests.class, SmallTests.class })
|
||||||
public class TestSyncFuture {
|
public class TestSyncFuture {
|
||||||
|
|
||||||
@Test(timeout = 60000)
|
@Test(expected = TimeoutIOException.class)
|
||||||
public void testGet() throws Exception {
|
public void testGet() throws Exception {
|
||||||
long timeout = 5000;
|
long timeout = 5000;
|
||||||
long txid = 100000;
|
long txid = 100000;
|
||||||
SyncFuture syncFulture = new SyncFuture(txid, null);
|
SyncFuture syncFulture = new SyncFuture().reset(txid, null);
|
||||||
syncFulture.done(txid, null);
|
syncFulture.done(txid, null);
|
||||||
assertEquals(txid, syncFulture.get(timeout));
|
assertEquals(txid, syncFulture.get(timeout));
|
||||||
|
|
||||||
syncFulture.reset(txid, null);
|
syncFulture.reset(txid, null).get(timeout);
|
||||||
try {
|
|
||||||
syncFulture.get(timeout);
|
|
||||||
fail("Should have timed out but not");
|
|
||||||
} catch (TimeoutIOException e) {
|
|
||||||
// test passed
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
|
||||||
|
|
Loading…
Reference in New Issue