From ec1c3559da121b410d6599a9b5bb99a78bbabcdf Mon Sep 17 00:00:00 2001 From: Sam Saffron Date: Tue, 9 Apr 2019 22:24:19 +1000 Subject: [PATCH] PERF: correct clean up inactive so it does not clog scheduler also add a hard limit of 1000 users per job run so we do not clog the scheduler destroyer.destroy has a transaction and this can have some serious complications with the open record set find_each has going --- app/jobs/scheduled/clean_up_inactive_users.rb | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/app/jobs/scheduled/clean_up_inactive_users.rb b/app/jobs/scheduled/clean_up_inactive_users.rb index 99579dbab9c..d79562f2a22 100644 --- a/app/jobs/scheduled/clean_up_inactive_users.rb +++ b/app/jobs/scheduled/clean_up_inactive_users.rb @@ -14,9 +14,10 @@ module Jobs "posts.user_id IS NULL AND users.last_seen_at < ?", SiteSetting.clean_up_inactive_users_after_days.days.ago ) - .find_each do |user| - + .limit(1000) + .pluck(:id).each do |id| begin + user = User.find(id) destroyer.destroy(user, context: I18n.t("user.destroy_reasons.inactive_user")) rescue => e Discourse.handle_job_exception(e,