From 24d8c74a527379faba8070fc160ebc1450e947f6 Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Wed, 4 Mar 2015 15:25:10 -0500 Subject: [PATCH] Core: throttle delete-by-query when merges are falling behind Delete-by-query is incredibly costly because it forces a refresh each time, so if you are also indexing this can cause massive segment explosion. This change throttles delete-by-query when merges can't keep up. It's likely not enough (#7052 is the long-term solution) but can only help. Closes #9986 --- .../elasticsearch/index/engine/InternalEngine.java | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index d05ed4512d5..acf207f70b3 100644 --- a/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -440,6 +440,7 @@ public class InternalEngine extends Engine { public void delete(Delete delete) throws EngineException { try (ReleasableLock _ = readLock.acquire()) { ensureOpen(); + // NOTE: we don't throttle this when merges fall behind because delete-by-id does not create new segments: innerDelete(delete); flushNeeded = true; } catch (OutOfMemoryError | IllegalStateException | IOException t) { @@ -507,6 +508,19 @@ public class InternalEngine extends Engine { public void delete(DeleteByQuery delete) throws EngineException { try (ReleasableLock _ = readLock.acquire()) { ensureOpen(); + if (delete.origin() == Operation.Origin.RECOVERY) { + // Don't throttle recovery operations + innerDelete(delete); + } else { + try (Releasable r = throttle.acquireThrottle()) { + innerDelete(delete); + } + } + } + } + + private void innerDelete(DeleteByQuery delete) throws EngineException { + try { Query query; if (delete.nested() && delete.aliasFilter() != null) { query = new IncludeNestedDocsQuery(new FilteredQuery(delete.query(), delete.aliasFilter()), delete.parentFilter());