Core: throttle delete-by-query when merges are falling behind

Delete-by-query is incredibly costly because it forces a refresh each
time, so if you are also indexing this can cause massive segment
explosion.

This change throttles delete-by-query when merges can't keep up.  It's
likely not enough (#7052 is the long-term solution) but can only
help.

Closes #9986
This commit is contained in:
Michael McCandless 2015-03-04 15:25:10 -05:00 committed by mikemccand
parent 764901a9cd
commit 24d8c74a52
1 changed files with 14 additions and 0 deletions

View File

@ -440,6 +440,7 @@ public class InternalEngine extends Engine {
public void delete(Delete delete) throws EngineException {
try (ReleasableLock _ = readLock.acquire()) {
ensureOpen();
// NOTE: we don't throttle this when merges fall behind because delete-by-id does not create new segments:
innerDelete(delete);
flushNeeded = true;
} catch (OutOfMemoryError | IllegalStateException | IOException t) {
@ -507,6 +508,19 @@ public class InternalEngine extends Engine {
public void delete(DeleteByQuery delete) throws EngineException {
try (ReleasableLock _ = readLock.acquire()) {
ensureOpen();
if (delete.origin() == Operation.Origin.RECOVERY) {
// Don't throttle recovery operations
innerDelete(delete);
} else {
try (Releasable r = throttle.acquireThrottle()) {
innerDelete(delete);
}
}
}
}
private void innerDelete(DeleteByQuery delete) throws EngineException {
try {
Query query;
if (delete.nested() && delete.aliasFilter() != null) {
query = new IncludeNestedDocsQuery(new FilteredQuery(delete.query(), delete.aliasFilter()), delete.parentFilter());