diff --git a/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/SpillingGrouper.java b/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/SpillingGrouper.java index 86365fef12b..dd038acbd18 100644 --- a/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/SpillingGrouper.java +++ b/processing/src/main/java/org/apache/druid/query/groupby/epinephelinae/SpillingGrouper.java @@ -79,6 +79,7 @@ public class SpillingGrouper implements Grouper private final List dictionaryFiles = new ArrayList<>(); private final boolean sortHasNonGroupingFields; + private boolean diskFull = false; private boolean spillingAllowed; public SpillingGrouper( @@ -171,6 +172,13 @@ public class SpillingGrouper implements Grouper @Override public AggregateResult aggregate(KeyType key, int keyHash) { + if (diskFull) { + // If the prior return was DISK_FULL, then return it again. When we return DISK_FULL to a processing thread, + // it skips the rest of the segment and the query is canceled. However, it's possible that the next segment + // starts processing before cancellation can kick in. We want that one, if it occurs, to see DISK_FULL too. + return DISK_FULL; + } + final AggregateResult result = grouper.aggregate(key, keyHash); if (result.isOk() || !spillingAllowed || temporaryStorage.maxSize() <= 0) { @@ -184,6 +192,7 @@ public class SpillingGrouper implements Grouper spill(); } catch (TemporaryStorageFullException e) { + diskFull = true; return DISK_FULL; } catch (IOException e) {