diff --git a/hapi-deployable-pom/pom.xml b/hapi-deployable-pom/pom.xml
index 83b6cb38520..f3f866d46a1 100644
--- a/hapi-deployable-pom/pom.xml
+++ b/hapi-deployable-pom/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-fhir
- 7.3.0-SNAPSHOT
+ 7.3.1-SNAPSHOT
../pom.xml
diff --git a/hapi-fhir-android/pom.xml b/hapi-fhir-android/pom.xml
index 87706cf277d..2dd9942ba2a 100644
--- a/hapi-fhir-android/pom.xml
+++ b/hapi-fhir-android/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 7.3.0-SNAPSHOT
+ 7.3.1-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-base/pom.xml b/hapi-fhir-base/pom.xml
index ed0a9fc6da4..188e9bcbe27 100644
--- a/hapi-fhir-base/pom.xml
+++ b/hapi-fhir-base/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 7.3.0-SNAPSHOT
+ 7.3.1-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-base/src/main/java/ca/uhn/fhir/model/api/PagingIterator.java b/hapi-fhir-base/src/main/java/ca/uhn/fhir/model/api/PagingIterator.java
index eeb9dd233da..4aeb07531f4 100644
--- a/hapi-fhir-base/src/main/java/ca/uhn/fhir/model/api/PagingIterator.java
+++ b/hapi-fhir-base/src/main/java/ca/uhn/fhir/model/api/PagingIterator.java
@@ -26,13 +26,16 @@ import java.util.LinkedList;
import java.util.NoSuchElementException;
import java.util.function.Consumer;
+/**
+ * This paging iterator only works with already ordered queries
+ */
public class PagingIterator implements Iterator {
public interface PageFetcher {
void fetchNextPage(int thePageIndex, int theBatchSize, Consumer theConsumer);
}
- static final int PAGE_SIZE = 100;
+ static final int DEFAULT_PAGE_SIZE = 100;
private int myPage;
@@ -42,8 +45,16 @@ public class PagingIterator implements Iterator {
private final PageFetcher myFetcher;
+ private final int myPageSize;
+
public PagingIterator(PageFetcher theFetcher) {
+ this(DEFAULT_PAGE_SIZE, theFetcher);
+ }
+
+ public PagingIterator(int thePageSize, PageFetcher theFetcher) {
+ assert thePageSize > 0 : "Page size must be a positive value";
myFetcher = theFetcher;
+ myPageSize = thePageSize;
}
@Override
@@ -66,9 +77,9 @@ public class PagingIterator implements Iterator {
private void fetchNextBatch() {
if (!myIsFinished && myCurrentBatch.isEmpty()) {
- myFetcher.fetchNextPage(myPage, PAGE_SIZE, myCurrentBatch::add);
+ myFetcher.fetchNextPage(myPage, myPageSize, myCurrentBatch::add);
myPage++;
- myIsFinished = myCurrentBatch.size() < PAGE_SIZE;
+ myIsFinished = myCurrentBatch.size() < myPageSize;
}
}
}
diff --git a/hapi-fhir-base/src/test/java/ca/uhn/fhir/model/api/PagingIteratorTest.java b/hapi-fhir-base/src/test/java/ca/uhn/fhir/model/api/PagingIteratorTest.java
index 340d7464684..6cd51910a1f 100644
--- a/hapi-fhir-base/src/test/java/ca/uhn/fhir/model/api/PagingIteratorTest.java
+++ b/hapi-fhir-base/src/test/java/ca/uhn/fhir/model/api/PagingIteratorTest.java
@@ -62,7 +62,7 @@ public class PagingIteratorTest {
public void next_fetchTest_fetchesAndReturns() {
// 3 cases to make sure we get the edge cases
for (int adj : new int[] { -1, 0, 1 }) {
- int size = PagingIterator.PAGE_SIZE + adj;
+ int size = PagingIterator.DEFAULT_PAGE_SIZE + adj;
myPagingIterator = createPagingIterator(size);
diff --git a/hapi-fhir-bom/pom.xml b/hapi-fhir-bom/pom.xml
index f05fea85c67..11be6c4e547 100644
--- a/hapi-fhir-bom/pom.xml
+++ b/hapi-fhir-bom/pom.xml
@@ -4,7 +4,7 @@
4.0.0
ca.uhn.hapi.fhir
hapi-fhir-bom
- 7.3.0-SNAPSHOT
+ 7.3.1-SNAPSHOT
pom
HAPI FHIR BOM
@@ -12,7 +12,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 7.3.0-SNAPSHOT
+ 7.3.1-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-checkstyle/pom.xml b/hapi-fhir-checkstyle/pom.xml
index 69c0460cccf..edeebb36033 100644
--- a/hapi-fhir-checkstyle/pom.xml
+++ b/hapi-fhir-checkstyle/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-fhir
- 7.3.0-SNAPSHOT
+ 7.3.1-SNAPSHOT
../pom.xml
diff --git a/hapi-fhir-cli/hapi-fhir-cli-api/pom.xml b/hapi-fhir-cli/hapi-fhir-cli-api/pom.xml
index ee43373d473..a6301421773 100644
--- a/hapi-fhir-cli/hapi-fhir-cli-api/pom.xml
+++ b/hapi-fhir-cli/hapi-fhir-cli-api/pom.xml
@@ -4,7 +4,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 7.3.0-SNAPSHOT
+ 7.3.1-SNAPSHOT
../../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-cli/hapi-fhir-cli-app/pom.xml b/hapi-fhir-cli/hapi-fhir-cli-app/pom.xml
index ec3ae13b3f0..5a80f6e818b 100644
--- a/hapi-fhir-cli/hapi-fhir-cli-app/pom.xml
+++ b/hapi-fhir-cli/hapi-fhir-cli-app/pom.xml
@@ -6,7 +6,7 @@
ca.uhn.hapi.fhir
hapi-fhir-cli
- 7.3.0-SNAPSHOT
+ 7.3.1-SNAPSHOT
../pom.xml
diff --git a/hapi-fhir-cli/pom.xml b/hapi-fhir-cli/pom.xml
index f7053e7e873..f3ad5743264 100644
--- a/hapi-fhir-cli/pom.xml
+++ b/hapi-fhir-cli/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-fhir
- 7.3.0-SNAPSHOT
+ 7.3.1-SNAPSHOT
../pom.xml
diff --git a/hapi-fhir-client-okhttp/pom.xml b/hapi-fhir-client-okhttp/pom.xml
index 35617f0402f..4038b5c527e 100644
--- a/hapi-fhir-client-okhttp/pom.xml
+++ b/hapi-fhir-client-okhttp/pom.xml
@@ -4,7 +4,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 7.3.0-SNAPSHOT
+ 7.3.1-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-client/pom.xml b/hapi-fhir-client/pom.xml
index e974a566096..a13e2b42330 100644
--- a/hapi-fhir-client/pom.xml
+++ b/hapi-fhir-client/pom.xml
@@ -4,7 +4,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 7.3.0-SNAPSHOT
+ 7.3.1-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-converter/pom.xml b/hapi-fhir-converter/pom.xml
index a14196db410..62951f59897 100644
--- a/hapi-fhir-converter/pom.xml
+++ b/hapi-fhir-converter/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 7.3.0-SNAPSHOT
+ 7.3.1-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-dist/pom.xml b/hapi-fhir-dist/pom.xml
index 3dda194c646..2e78389b1ec 100644
--- a/hapi-fhir-dist/pom.xml
+++ b/hapi-fhir-dist/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-fhir
- 7.3.0-SNAPSHOT
+ 7.3.1-SNAPSHOT
../pom.xml
diff --git a/hapi-fhir-docs/pom.xml b/hapi-fhir-docs/pom.xml
index 41acfef29b0..b60a2d29aa1 100644
--- a/hapi-fhir-docs/pom.xml
+++ b/hapi-fhir-docs/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 7.3.0-SNAPSHOT
+ 7.3.1-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/changelog/7_2_0/5745-added-ready-state-to-batch2-work-chunks.yaml b/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/changelog/7_2_0/5745-added-ready-state-to-batch2-work-chunks.yaml
new file mode 100644
index 00000000000..e293b0a5f43
--- /dev/null
+++ b/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/changelog/7_2_0/5745-added-ready-state-to-batch2-work-chunks.yaml
@@ -0,0 +1,10 @@
+---
+type: add
+issue: 5745
+title: "Added another state to the Batch2 work chunk state machine: `READY`.
+ This work chunk state will be the initial state on creation.
+ Once queued for delivery, they will transition to `QUEUED`.
+ The exception is for ReductionStep chunks (because reduction steps
+ are not read off of the queue, but executed by the maintenance job
+ inline.
+"
diff --git a/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/changelog/7_2_0/5767-add-poll-waiting-step-to-batch-jobs.yaml b/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/changelog/7_2_0/5767-add-poll-waiting-step-to-batch-jobs.yaml
new file mode 100644
index 00000000000..1d14dee8c60
--- /dev/null
+++ b/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/changelog/7_2_0/5767-add-poll-waiting-step-to-batch-jobs.yaml
@@ -0,0 +1,9 @@
+---
+type: add
+issue: 5767
+title: "Added new `POLL_WAITING` state for WorkChunks in batch jobs.
+ Also added RetryChunkLaterException for jobs that have steps that
+ need to be retried at a later time (can be provided optionally to exception).
+ If a step throws this new exception, it will be set with the new
+ `POLL_WAITING` status and retried at a later time.
+"
diff --git a/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/changelog/7_2_0/5818-update-batch2-framework-with-gate_waiting-state.yaml b/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/changelog/7_2_0/5818-update-batch2-framework-with-gate_waiting-state.yaml
new file mode 100644
index 00000000000..197f01bf9f5
--- /dev/null
+++ b/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/changelog/7_2_0/5818-update-batch2-framework-with-gate_waiting-state.yaml
@@ -0,0 +1,7 @@
+---
+type: add
+issue: 5818
+title: "Added another state to the Batch2 work chunk state machine: `GATE_WAITING`.
+ This work chunk state will be the initial state on creation for gated jobs.
+ Once all chunks are completed for the previous step, they will transition to `READY`.
+"
diff --git a/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/docs/server_jpa_batch/batch2_states.md b/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/docs/server_jpa_batch/batch2_states.md
index b11e7187ce5..e085a33f7e0 100644
--- a/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/docs/server_jpa_batch/batch2_states.md
+++ b/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/docs/server_jpa_batch/batch2_states.md
@@ -47,24 +47,35 @@ stateDiagram-v2
title: Batch2 Job Work Chunk state transitions
---
stateDiagram-v2
+ state GATE_WAITING
+ state READY
+ state REDUCTION_READY
state QUEUED
state on_receive <>
state IN_PROGRESS
state ERROR
+ state POLL_WAITING
state execute <>
state FAILED
state COMPLETED
direction LR
- [*] --> QUEUED : on create
+ [*] --> READY : on create - normal or gated jobs first chunks
+ [*] --> GATE_WAITING : on create - gated jobs for all but the first chunks of the first step
+ GATE_WAITING --> READY : on gate release - gated
+ GATE_WAITING --> REDUCTION_READY : on gate release for the final reduction step (all reduction jobs are gated)
+ QUEUED --> READY : on gate release - gated (for compatibility with legacy QUEUED state up to Hapi-fhir version 7.1)
+ READY --> QUEUED : placed on kafka (maint.)
+ POLL_WAITING --> READY : after a poll delay on a POLL_WAITING work chunk has elapsed
%% worker processing states
- QUEUED --> on_receive : on deque by worker
+ QUEUED --> on_receive : on deque by worker
on_receive --> IN_PROGRESS : start execution
IN_PROGRESS --> execute: execute
execute --> ERROR : on re-triable error
execute --> COMPLETED : success\n maybe trigger instance first_step_finished
execute --> FAILED : on unrecoverable \n or too many errors
+ execute --> POLL_WAITING : job step has throw a RetryChunkLaterException and must be tried again after the provided poll delay
%% temporary error state until retry
ERROR --> on_receive : exception rollback\n triggers redelivery
diff --git a/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/docs/server_jpa_batch/introduction.md b/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/docs/server_jpa_batch/introduction.md
index ce8dbc4a1f0..1c3bb485c21 100644
--- a/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/docs/server_jpa_batch/introduction.md
+++ b/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/docs/server_jpa_batch/introduction.md
@@ -19,36 +19,54 @@ A HAPI-FHIR batch job definition consists of a job name, version, parameter json
After a job has been defined, *instances* of that job can be submitted for batch processing by populating a `JobInstanceStartRequest` with the job name and job parameters json and then submitting that request to the Batch Job Coordinator.
The Batch Job Coordinator will then store two records in the database:
-- Job Instance with status QUEUED: that is the parent record for all data concerning this job
-- Batch Work Chunk with status QUEUED: this describes the first "chunk" of work required for this job. The first Batch Work Chunk contains no data.
+- Job Instance with status `QUEUED`: that is the parent record for all data concerning this job
+- Batch Work Chunk with status `READY`: this describes the first "chunk" of work required for this job. The first Batch Work Chunk contains no data.
-Lastly the Batch Job Coordinator publishes a message to the Batch Notification Message Channel (named `batch2-work-notification`) to inform worker threads that this first chunk of work is now ready for processing.
+### The Maintenance Job
-### Job Processing - First Step
+A Scheduled Job runs periodically (once a minute). For each Job Instance in the database, it:
-HAPI-FHIR Batch Jobs run based on job notification messages. The process is kicked off by the first chunk of work. When this notification message arrives, the message handler makes a single call to the first step defined in the job definition, passing in the job parameters as input.
+1. Calculates job progress (% of work chunks in `COMPLETE` status). If the job is finished, purges any left over work chunks still in the database.
+1. Moves all `POLL_WAITING` work chunks to `READY` if their `nextPollTime` has expired.
+1. Calculates job progress (% of work chunks in `COMPLETE` status). If the job is finished, purges any leftover work chunks still in the database.
+1. Cleans up any complete, failed, or cancelled jobs that need to be removed.
+1. When the current step is complete, moves any gated jobs onto their next step and updates all chunks in `GATE_WAITING` to `READY`. If the the job is being moved to its final reduction step, chunks are moved from `GATE_WAITING` to `REDUCTION_READY`.
+1. If the final step of a gated job is a reduction step, a reduction step execution will be triggered. All workchunks for the job in `REDUCTION_READY` will be consumed at this point.
+1. Moves all `READY` work chunks into the `QUEUED` state and publishes a message to the Batch Notification Message Channel to inform worker threads that a work chunk is now ready for processing. \*
-The handler then does the following:
-1. Change the work chunk status from QUEUED to IN_PROGRESS
-2. Change the Job Instance status from QUEUED to IN_PROGRESS
-3. If the Job Instance is cancelled, change the status to CANCELLED and abort processing.
-4. The first step of the job definition is executed with the job parameters
-5. This step creates new work chunks. For each work chunk it creates, it json serializes the work chunk data, stores it in the database, and publishes a new message to the Batch Notification Message Channel to notify worker threads that there are new work chunks waiting to be processed.
-6. If the step succeeded, the work chunk status is changed from IN_PROGRESS to COMPLETED, and the data it contained is deleted.
-7. If the step failed, the work chunk status is changed from IN_PROGRESS to either ERRORED or FAILED depending on the severity of the error.
+\* An exception is for the final reduction step, where work chunks are not published to the Batch Notification Message Channel,
+but instead processed inline.
-### Job Processing - Middle steps
+### Batch Notification Message Handler
-Middle Steps in the job definition are executed in the same way, except instead of only using the Job Parameters as input, they use both the Job Parameters and the Work Chunk data produced from the previous step.
+HAPI-FHIR Batch Jobs run based on job notification messages of the Batch Notification Message Channel (named `batch2-work-notification`).
-### Job Processing - Final Step
+When a notification message arrives, the handler does the following:
+
+1. Change the work chunk status from `QUEUED` to `IN_PROGRESS`
+1. Change the Job Instance status from `QUEUED` to `IN_PROGRESS`
+1. If the Job Instance is cancelled, change the status to `CANCELLED` and abort processing
+1. If the step creates new work chunks, each work chunk will be created in either the `GATE_WAITING` state (for gated jobs) or `READY` state (for non-gated jobs) and will be handled in the next maintenance job pass.
+1. If the step succeeds, the work chunk status is changed from `IN_PROGRESS` to `COMPLETED`, and the data it contained is deleted.
+1. If the step throws a `RetryChunkLaterException`, the work chunk status is changed from `IN_PROGRESS` to `POLL_WAITING`, and a `nextPollTime` value will be set.
+1. If the step fails, the work chunk status is changed from `IN_PROGRESS` to either `ERRORED` or `FAILED`, depending on the severity of the error.
+
+### First Step
+
+The first step in a job definition is executed with just the job parameters.
+
+### Middle steps
+
+Middle Steps in the job definition are executed using the initial Job Parameters and the Work Chunk data produced from the previous step.
+
+### Final Step
The final step operates the same way as the middle steps, except it does not produce any new work chunks.
### Gated Execution
-If a Job Definition is set to having Gated Execution, then all work chunks for one step must be COMPLETED before any work chunks for the next step may begin.
+If a Job Definition is set to having Gated Execution, then all work chunks for a step must be `COMPLETED` before any work chunks for the next step may begin.
### Job Instance Completion
-A Batch Job Maintenance Service runs every minute to monitor the status of all Job Instances and the Job Instance is transitioned to either COMPLETED, ERRORED or FAILED according to the status of all outstanding work chunks for that job instance. If the job instance is still IN_PROGRESS this maintenance service also estimates the time remaining to complete the job.
+A Batch Job Maintenance Service runs every minute to monitor the status of all Job Instances and the Job Instance is transitioned to either `COMPLETED`, `ERRORED` or `FAILED` according to the status of all outstanding work chunks for that job instance. If the job instance is still `IN_PROGRESS` this maintenance service also estimates the time remaining to complete the job.
diff --git a/hapi-fhir-jacoco/pom.xml b/hapi-fhir-jacoco/pom.xml
index 2079a6bb970..3ba709907e0 100644
--- a/hapi-fhir-jacoco/pom.xml
+++ b/hapi-fhir-jacoco/pom.xml
@@ -11,7 +11,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 7.3.0-SNAPSHOT
+ 7.3.1-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-jaxrsserver-base/pom.xml b/hapi-fhir-jaxrsserver-base/pom.xml
index 7077325c2c6..a36f270f1ca 100644
--- a/hapi-fhir-jaxrsserver-base/pom.xml
+++ b/hapi-fhir-jaxrsserver-base/pom.xml
@@ -4,7 +4,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 7.3.0-SNAPSHOT
+ 7.3.1-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-jpa/pom.xml b/hapi-fhir-jpa/pom.xml
index b7dd29597aa..fd28ff49ed0 100644
--- a/hapi-fhir-jpa/pom.xml
+++ b/hapi-fhir-jpa/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 7.3.0-SNAPSHOT
+ 7.3.1-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-jpa/src/main/java/ca/uhn/fhir/jpa/model/sched/IHapiScheduler.java b/hapi-fhir-jpa/src/main/java/ca/uhn/fhir/jpa/model/sched/IHapiScheduler.java
index f2084bfa7c8..f9cb5e6c020 100644
--- a/hapi-fhir-jpa/src/main/java/ca/uhn/fhir/jpa/model/sched/IHapiScheduler.java
+++ b/hapi-fhir-jpa/src/main/java/ca/uhn/fhir/jpa/model/sched/IHapiScheduler.java
@@ -37,6 +37,17 @@ public interface IHapiScheduler {
void logStatusForUnitTest();
+ /**
+ * Pauses this scheduler (and thus all scheduled jobs).
+ * To restart call {@link #unpause()}
+ */
+ void pause();
+
+ /**
+ * Restarts this scheduler after {@link #pause()}
+ */
+ void unpause();
+
void scheduleJob(long theIntervalMillis, ScheduledJobDefinition theJobDefinition);
Set getJobKeysForUnitTest() throws SchedulerException;
diff --git a/hapi-fhir-jpa/src/main/java/ca/uhn/fhir/jpa/model/sched/ISchedulerService.java b/hapi-fhir-jpa/src/main/java/ca/uhn/fhir/jpa/model/sched/ISchedulerService.java
index c058198e03c..5ff1057937c 100644
--- a/hapi-fhir-jpa/src/main/java/ca/uhn/fhir/jpa/model/sched/ISchedulerService.java
+++ b/hapi-fhir-jpa/src/main/java/ca/uhn/fhir/jpa/model/sched/ISchedulerService.java
@@ -32,6 +32,20 @@ public interface ISchedulerService {
void logStatusForUnitTest();
+ /**
+ * Pauses the scheduler so no new jobs will run.
+ * Useful in tests when cleanup needs to happen but scheduled jobs may
+ * be running
+ */
+ @VisibleForTesting
+ void pause();
+
+ /**
+ * Restarts the scheduler after a previous call to {@link #pause()}.
+ */
+ @VisibleForTesting
+ void unpause();
+
/**
* This task will execute locally (and should execute on all nodes of the cluster if there is a cluster)
* @param theIntervalMillis How many milliseconds between passes should this job run
@@ -52,6 +66,9 @@ public interface ISchedulerService {
@VisibleForTesting
Set getClusteredJobKeysForUnitTest() throws SchedulerException;
+ @VisibleForTesting
+ boolean isSchedulingDisabled();
+
boolean isStopping();
/**
diff --git a/hapi-fhir-jpa/src/main/java/ca/uhn/fhir/jpa/sched/BaseHapiScheduler.java b/hapi-fhir-jpa/src/main/java/ca/uhn/fhir/jpa/sched/BaseHapiScheduler.java
index 916bebe93fa..f8def318609 100644
--- a/hapi-fhir-jpa/src/main/java/ca/uhn/fhir/jpa/sched/BaseHapiScheduler.java
+++ b/hapi-fhir-jpa/src/main/java/ca/uhn/fhir/jpa/sched/BaseHapiScheduler.java
@@ -29,6 +29,7 @@ import com.google.common.collect.Sets;
import jakarta.annotation.Nonnull;
import org.apache.commons.lang3.Validate;
import org.quartz.JobDataMap;
+import org.quartz.JobExecutionContext;
import org.quartz.JobKey;
import org.quartz.ScheduleBuilder;
import org.quartz.Scheduler;
@@ -44,11 +45,14 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.scheduling.quartz.SchedulerFactoryBean;
+import java.util.List;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
+import static org.apache.commons.lang3.StringUtils.isNotBlank;
+
public abstract class BaseHapiScheduler implements IHapiScheduler {
private static final Logger ourLog = LoggerFactory.getLogger(BaseHapiScheduler.class);
@@ -151,6 +155,42 @@ public abstract class BaseHapiScheduler implements IHapiScheduler {
}
}
+ public void pause() {
+ int delay = 100;
+ String errorMsg = null;
+ Throwable ex = null;
+ try {
+ int count = 0;
+ myScheduler.standby();
+ while (count < 3) {
+ if (!hasRunningJobs()) {
+ break;
+ }
+ Thread.sleep(delay);
+ count++;
+ }
+ if (count >= 3) {
+ errorMsg = "Scheduler on standby. But after " + (count + 1) * delay
+ + " ms there are still jobs running. Execution will continue, but may cause bugs.";
+ }
+ } catch (Exception x) {
+ ex = x;
+ errorMsg = "Failed to set to standby. Execution will continue, but may cause bugs.";
+ }
+
+ if (isNotBlank(errorMsg)) {
+ if (ex != null) {
+ ourLog.warn(errorMsg, ex);
+ } else {
+ ourLog.warn(errorMsg);
+ }
+ }
+ }
+
+ public void unpause() {
+ start();
+ }
+
@Override
public void clear() throws SchedulerException {
myScheduler.clear();
@@ -168,6 +208,16 @@ public abstract class BaseHapiScheduler implements IHapiScheduler {
}
}
+ private boolean hasRunningJobs() {
+ try {
+ List currentlyExecutingJobs = myScheduler.getCurrentlyExecutingJobs();
+ ourLog.info("Checking for running jobs. Found {} running.", currentlyExecutingJobs);
+ return !currentlyExecutingJobs.isEmpty();
+ } catch (SchedulerException ex) {
+ throw new RuntimeException(Msg.code(2521) + " Failed during check for scheduled jobs", ex);
+ }
+ }
+
@Override
public void scheduleJob(long theIntervalMillis, ScheduledJobDefinition theJobDefinition) {
Validate.isTrue(theIntervalMillis >= 100);
diff --git a/hapi-fhir-jpa/src/main/java/ca/uhn/fhir/jpa/sched/BaseSchedulerServiceImpl.java b/hapi-fhir-jpa/src/main/java/ca/uhn/fhir/jpa/sched/BaseSchedulerServiceImpl.java
index 358aa408176..89036097ecf 100644
--- a/hapi-fhir-jpa/src/main/java/ca/uhn/fhir/jpa/sched/BaseSchedulerServiceImpl.java
+++ b/hapi-fhir-jpa/src/main/java/ca/uhn/fhir/jpa/sched/BaseSchedulerServiceImpl.java
@@ -136,7 +136,7 @@ public abstract class BaseSchedulerServiceImpl implements ISchedulerService {
return retval;
}
- private boolean isSchedulingDisabled() {
+ public boolean isSchedulingDisabled() {
return !isLocalSchedulingEnabled() || isSchedulingDisabledForUnitTests();
}
@@ -198,6 +198,18 @@ public abstract class BaseSchedulerServiceImpl implements ISchedulerService {
myClusteredScheduler.logStatusForUnitTest();
}
+ @Override
+ public void pause() {
+ myLocalScheduler.pause();
+ myClusteredScheduler.pause();
+ }
+
+ @Override
+ public void unpause() {
+ myLocalScheduler.unpause();
+ myClusteredScheduler.unpause();
+ }
+
@Override
public void scheduleLocalJob(long theIntervalMillis, ScheduledJobDefinition theJobDefinition) {
scheduleJob("local", myLocalScheduler, theIntervalMillis, theJobDefinition);
diff --git a/hapi-fhir-jpa/src/main/java/ca/uhn/fhir/jpa/sched/HapiNullScheduler.java b/hapi-fhir-jpa/src/main/java/ca/uhn/fhir/jpa/sched/HapiNullScheduler.java
index 349174eacfd..77c7217e850 100644
--- a/hapi-fhir-jpa/src/main/java/ca/uhn/fhir/jpa/sched/HapiNullScheduler.java
+++ b/hapi-fhir-jpa/src/main/java/ca/uhn/fhir/jpa/sched/HapiNullScheduler.java
@@ -53,6 +53,16 @@ public class HapiNullScheduler implements IHapiScheduler {
@Override
public void logStatusForUnitTest() {}
+ @Override
+ public void pause() {
+ // nothing to do
+ }
+
+ @Override
+ public void unpause() {
+ // nothing to do
+ }
+
@Override
public void scheduleJob(long theIntervalMillis, ScheduledJobDefinition theJobDefinition) {
ourLog.debug("Skipping scheduling job {} since scheduling is disabled", theJobDefinition.getId());
diff --git a/hapi-fhir-jpaserver-base/pom.xml b/hapi-fhir-jpaserver-base/pom.xml
index 6cf0698fa2e..d4995828336 100644
--- a/hapi-fhir-jpaserver-base/pom.xml
+++ b/hapi-fhir-jpaserver-base/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 7.3.0-SNAPSHOT
+ 7.3.1-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/batch2/JobInstanceUtil.java b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/batch2/JobInstanceUtil.java
index c2db708638a..51698299f79 100644
--- a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/batch2/JobInstanceUtil.java
+++ b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/batch2/JobInstanceUtil.java
@@ -123,6 +123,8 @@ class JobInstanceUtil {
retVal.setErrorMessage(theEntity.getErrorMessage());
retVal.setErrorCount(theEntity.getErrorCount());
retVal.setRecordsProcessed(theEntity.getRecordsProcessed());
+ retVal.setNextPollTime(theEntity.getNextPollTime());
+ retVal.setPollAttempts(theEntity.getPollAttempts());
// note: may be null out if queried NoData
retVal.setData(theEntity.getSerializedData());
retVal.setWarningMessage(theEntity.getWarningMessage());
diff --git a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/batch2/JpaBatch2Config.java b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/batch2/JpaBatch2Config.java
index 056a546b5c9..f73a88570f3 100644
--- a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/batch2/JpaBatch2Config.java
+++ b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/batch2/JpaBatch2Config.java
@@ -24,6 +24,7 @@ import ca.uhn.fhir.batch2.config.BaseBatch2Config;
import ca.uhn.fhir.interceptor.api.IInterceptorBroadcaster;
import ca.uhn.fhir.jpa.bulk.export.job.BulkExportJobConfig;
import ca.uhn.fhir.jpa.dao.data.IBatch2JobInstanceRepository;
+import ca.uhn.fhir.jpa.dao.data.IBatch2WorkChunkMetadataViewRepository;
import ca.uhn.fhir.jpa.dao.data.IBatch2WorkChunkRepository;
import ca.uhn.fhir.jpa.dao.tx.IHapiTransactionService;
import jakarta.persistence.EntityManager;
@@ -39,12 +40,14 @@ public class JpaBatch2Config extends BaseBatch2Config {
public IJobPersistence batch2JobInstancePersister(
IBatch2JobInstanceRepository theJobInstanceRepository,
IBatch2WorkChunkRepository theWorkChunkRepository,
+ IBatch2WorkChunkMetadataViewRepository theWorkChunkMetadataViewRepo,
IHapiTransactionService theTransactionService,
EntityManager theEntityManager,
IInterceptorBroadcaster theInterceptorBroadcaster) {
return new JpaJobPersistenceImpl(
theJobInstanceRepository,
theWorkChunkRepository,
+ theWorkChunkMetadataViewRepo,
theTransactionService,
theEntityManager,
theInterceptorBroadcaster);
diff --git a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/batch2/JpaJobPersistenceImpl.java b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/batch2/JpaJobPersistenceImpl.java
index 48140f8477c..5ea89d2adb4 100644
--- a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/batch2/JpaJobPersistenceImpl.java
+++ b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/batch2/JpaJobPersistenceImpl.java
@@ -28,16 +28,19 @@ import ca.uhn.fhir.batch2.model.WorkChunk;
import ca.uhn.fhir.batch2.model.WorkChunkCompletionEvent;
import ca.uhn.fhir.batch2.model.WorkChunkCreateEvent;
import ca.uhn.fhir.batch2.model.WorkChunkErrorEvent;
+import ca.uhn.fhir.batch2.model.WorkChunkMetadata;
import ca.uhn.fhir.batch2.model.WorkChunkStatusEnum;
import ca.uhn.fhir.batch2.models.JobInstanceFetchRequest;
import ca.uhn.fhir.interceptor.api.HookParams;
import ca.uhn.fhir.interceptor.api.IInterceptorBroadcaster;
import ca.uhn.fhir.interceptor.api.Pointcut;
import ca.uhn.fhir.jpa.dao.data.IBatch2JobInstanceRepository;
+import ca.uhn.fhir.jpa.dao.data.IBatch2WorkChunkMetadataViewRepository;
import ca.uhn.fhir.jpa.dao.data.IBatch2WorkChunkRepository;
import ca.uhn.fhir.jpa.dao.tx.IHapiTransactionService;
import ca.uhn.fhir.jpa.entity.Batch2JobInstanceEntity;
import ca.uhn.fhir.jpa.entity.Batch2WorkChunkEntity;
+import ca.uhn.fhir.jpa.entity.Batch2WorkChunkMetadataView;
import ca.uhn.fhir.model.api.PagingIterator;
import ca.uhn.fhir.rest.api.server.RequestDetails;
import ca.uhn.fhir.rest.api.server.SystemRequestDetails;
@@ -64,7 +67,10 @@ import org.springframework.transaction.annotation.Propagation;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.transaction.support.TransactionSynchronizationManager;
+import java.time.Instant;
+import java.util.Collections;
import java.util.Date;
+import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Objects;
@@ -85,6 +91,7 @@ public class JpaJobPersistenceImpl implements IJobPersistence {
private final IBatch2JobInstanceRepository myJobInstanceRepository;
private final IBatch2WorkChunkRepository myWorkChunkRepository;
+ private final IBatch2WorkChunkMetadataViewRepository myWorkChunkMetadataViewRepo;
private final EntityManager myEntityManager;
private final IHapiTransactionService myTransactionService;
private final IInterceptorBroadcaster myInterceptorBroadcaster;
@@ -95,13 +102,15 @@ public class JpaJobPersistenceImpl implements IJobPersistence {
public JpaJobPersistenceImpl(
IBatch2JobInstanceRepository theJobInstanceRepository,
IBatch2WorkChunkRepository theWorkChunkRepository,
+ IBatch2WorkChunkMetadataViewRepository theWorkChunkMetadataViewRepo,
IHapiTransactionService theTransactionService,
EntityManager theEntityManager,
IInterceptorBroadcaster theInterceptorBroadcaster) {
- Validate.notNull(theJobInstanceRepository);
- Validate.notNull(theWorkChunkRepository);
+ Validate.notNull(theJobInstanceRepository, "theJobInstanceRepository");
+ Validate.notNull(theWorkChunkRepository, "theWorkChunkRepository");
myJobInstanceRepository = theJobInstanceRepository;
myWorkChunkRepository = theWorkChunkRepository;
+ myWorkChunkMetadataViewRepo = theWorkChunkMetadataViewRepo;
myTransactionService = theTransactionService;
myEntityManager = theEntityManager;
myInterceptorBroadcaster = theInterceptorBroadcaster;
@@ -120,23 +129,46 @@ public class JpaJobPersistenceImpl implements IJobPersistence {
entity.setSerializedData(theBatchWorkChunk.serializedData);
entity.setCreateTime(new Date());
entity.setStartTime(new Date());
- entity.setStatus(WorkChunkStatusEnum.QUEUED);
+ entity.setStatus(getOnCreateStatus(theBatchWorkChunk));
+
ourLog.debug("Create work chunk {}/{}/{}", entity.getInstanceId(), entity.getId(), entity.getTargetStepId());
ourLog.trace(
"Create work chunk data {}/{}: {}", entity.getInstanceId(), entity.getId(), entity.getSerializedData());
myTransactionService.withSystemRequestOnDefaultPartition().execute(() -> myWorkChunkRepository.save(entity));
+
return entity.getId();
}
+ /**
+ * Gets the initial onCreate state for the given workchunk.
+ * Gated job chunks start in GATE_WAITING; they will be transitioned to READY during maintenance pass when all
+ * chunks in the previous step are COMPLETED.
+ * Non gated job chunks start in READY
+ */
+ private static WorkChunkStatusEnum getOnCreateStatus(WorkChunkCreateEvent theBatchWorkChunk) {
+ if (theBatchWorkChunk.isGatedExecution) {
+ return WorkChunkStatusEnum.GATE_WAITING;
+ } else {
+ return WorkChunkStatusEnum.READY;
+ }
+ }
+
@Override
@Transactional(propagation = Propagation.REQUIRED)
public Optional onWorkChunkDequeue(String theChunkId) {
+ // take a lock on the chunk id to ensure that the maintenance run isn't doing anything.
+ Batch2WorkChunkEntity chunkLock =
+ myEntityManager.find(Batch2WorkChunkEntity.class, theChunkId, LockModeType.PESSIMISTIC_WRITE);
+ // remove from the current state to avoid stale data.
+ myEntityManager.detach(chunkLock);
+
// NOTE: Ideally, IN_PROGRESS wouldn't be allowed here. On chunk failure, we probably shouldn't be allowed.
// But how does re-run happen if k8s kills a processor mid run?
List priorStates =
List.of(WorkChunkStatusEnum.QUEUED, WorkChunkStatusEnum.ERRORED, WorkChunkStatusEnum.IN_PROGRESS);
int rowsModified = myWorkChunkRepository.updateChunkStatusForStart(
theChunkId, new Date(), WorkChunkStatusEnum.IN_PROGRESS, priorStates);
+
if (rowsModified == 0) {
ourLog.info("Attempting to start chunk {} but it was already started.", theChunkId);
return Optional.empty();
@@ -288,6 +320,22 @@ public class JpaJobPersistenceImpl implements IJobPersistence {
.collect(Collectors.toList()));
}
+ @Override
+ public void enqueueWorkChunkForProcessing(String theChunkId, Consumer theCallback) {
+ int updated = myWorkChunkRepository.updateChunkStatus(
+ theChunkId, WorkChunkStatusEnum.READY, WorkChunkStatusEnum.QUEUED);
+ theCallback.accept(updated);
+ }
+
+ @Override
+ public int updatePollWaitingChunksForJobIfReady(String theInstanceId) {
+ return myWorkChunkRepository.updateWorkChunksForPollWaiting(
+ theInstanceId,
+ Date.from(Instant.now()),
+ Set.of(WorkChunkStatusEnum.POLL_WAITING),
+ WorkChunkStatusEnum.READY);
+ }
+
@Override
@Transactional(propagation = Propagation.REQUIRES_NEW)
public List fetchRecentInstances(int thePageSize, int thePageIndex) {
@@ -333,6 +381,16 @@ public class JpaJobPersistenceImpl implements IJobPersistence {
});
}
+ @Override
+ public void onWorkChunkPollDelay(String theChunkId, Date theDeadline) {
+ int updated = myWorkChunkRepository.updateWorkChunkNextPollTime(
+ theChunkId, WorkChunkStatusEnum.POLL_WAITING, Set.of(WorkChunkStatusEnum.IN_PROGRESS), theDeadline);
+
+ if (updated != 1) {
+ ourLog.warn("Expected to update 1 work chunk's poll delay; but found {}", updated);
+ }
+ }
+
@Override
public void onWorkChunkFailed(String theChunkId, String theErrorMessage) {
ourLog.info("Marking chunk {} as failed with message: {}", theChunkId, theErrorMessage);
@@ -383,24 +441,23 @@ public class JpaJobPersistenceImpl implements IJobPersistence {
}
@Override
- @Transactional(propagation = Propagation.REQUIRES_NEW)
- public boolean canAdvanceInstanceToNextStep(String theInstanceId, String theCurrentStepId) {
+ public Set getDistinctWorkChunkStatesForJobAndStep(
+ String theInstanceId, String theCurrentStepId) {
+ if (getRunningJob(theInstanceId) == null) {
+ return Collections.unmodifiableSet(new HashSet<>());
+ }
+ return myWorkChunkRepository.getDistinctStatusesForStep(theInstanceId, theCurrentStepId);
+ }
+
+ private Batch2JobInstanceEntity getRunningJob(String theInstanceId) {
Optional instance = myJobInstanceRepository.findById(theInstanceId);
if (instance.isEmpty()) {
- return false;
+ return null;
}
if (instance.get().getStatus().isEnded()) {
- return false;
+ return null;
}
- Set statusesForStep =
- myWorkChunkRepository.getDistinctStatusesForStep(theInstanceId, theCurrentStepId);
-
- ourLog.debug(
- "Checking whether gated job can advanced to next step. [instanceId={}, currentStepId={}, statusesForStep={}]",
- theInstanceId,
- theCurrentStepId,
- statusesForStep);
- return statusesForStep.isEmpty() || statusesForStep.equals(Set.of(WorkChunkStatusEnum.COMPLETED));
+ return instance.get();
}
private void fetchChunks(
@@ -428,18 +485,16 @@ public class JpaJobPersistenceImpl implements IJobPersistence {
}
@Override
- public List fetchAllChunkIdsForStepWithStatus(
- String theInstanceId, String theStepId, WorkChunkStatusEnum theStatusEnum) {
- return myTransactionService
- .withSystemRequest()
- .withPropagation(Propagation.REQUIRES_NEW)
- .execute(() -> myWorkChunkRepository.fetchAllChunkIdsForStepWithStatus(
- theInstanceId, theStepId, theStatusEnum));
+ public void updateInstanceUpdateTime(String theInstanceId) {
+ myJobInstanceRepository.updateInstanceUpdateTime(theInstanceId, new Date());
}
@Override
- public void updateInstanceUpdateTime(String theInstanceId) {
- myJobInstanceRepository.updateInstanceUpdateTime(theInstanceId, new Date());
+ public WorkChunk createWorkChunk(WorkChunk theWorkChunk) {
+ if (theWorkChunk.getId() == null) {
+ theWorkChunk.setId(UUID.randomUUID().toString());
+ }
+ return toChunk(myWorkChunkRepository.save(Batch2WorkChunkEntity.fromWorkChunk(theWorkChunk)));
}
/**
@@ -458,6 +513,15 @@ public class JpaJobPersistenceImpl implements IJobPersistence {
.map(this::toChunk);
}
+ @Override
+ public Page fetchAllWorkChunkMetadataForJobInStates(
+ Pageable thePageable, String theInstanceId, Set theStates) {
+ Page page =
+ myWorkChunkMetadataViewRepo.fetchWorkChunkMetadataForJobInStates(thePageable, theInstanceId, theStates);
+
+ return page.map(Batch2WorkChunkMetadataView::toChunkMetadata);
+ }
+
@Override
public boolean updateInstance(String theInstanceId, JobInstanceUpdateCallback theModifier) {
Batch2JobInstanceEntity instanceEntity =
@@ -542,4 +606,45 @@ public class JpaJobPersistenceImpl implements IJobPersistence {
myInterceptorBroadcaster.callHooks(Pointcut.STORAGE_PRESTORAGE_BATCH_JOB_CREATE, params);
}
}
+
+ @Override
+ @Transactional(propagation = Propagation.REQUIRES_NEW)
+ public boolean advanceJobStepAndUpdateChunkStatus(
+ String theJobInstanceId, String theNextStepId, boolean theIsReductionStep) {
+ boolean changed = updateInstance(theJobInstanceId, instance -> {
+ if (instance.getCurrentGatedStepId().equals(theNextStepId)) {
+ // someone else beat us here. No changes
+ return false;
+ }
+ ourLog.debug("Moving gated instance {} to the next step {}.", theJobInstanceId, theNextStepId);
+ instance.setCurrentGatedStepId(theNextStepId);
+ return true;
+ });
+
+ if (changed) {
+ ourLog.debug(
+ "Updating chunk status from GATE_WAITING to READY for gated instance {} in step {}.",
+ theJobInstanceId,
+ theNextStepId);
+ WorkChunkStatusEnum nextStep =
+ theIsReductionStep ? WorkChunkStatusEnum.REDUCTION_READY : WorkChunkStatusEnum.READY;
+ // when we reach here, the current step id is equal to theNextStepId
+ // Up to 7.1, gated jobs' work chunks are created in status QUEUED but not actually queued for the
+ // workers.
+ // In order to keep them compatible, turn QUEUED chunks into READY, too.
+ // TODO: 'QUEUED' from the IN clause will be removed after 7.6.0.
+ int numChanged = myWorkChunkRepository.updateAllChunksForStepWithStatus(
+ theJobInstanceId,
+ theNextStepId,
+ List.of(WorkChunkStatusEnum.GATE_WAITING, WorkChunkStatusEnum.QUEUED),
+ nextStep);
+ ourLog.debug(
+ "Updated {} chunks of gated instance {} for step {} from fake QUEUED to READY.",
+ numChanged,
+ theJobInstanceId,
+ theNextStepId);
+ }
+
+ return changed;
+ }
}
diff --git a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/data/IBatch2WorkChunkMetadataViewRepository.java b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/data/IBatch2WorkChunkMetadataViewRepository.java
new file mode 100644
index 00000000000..2c759143ef6
--- /dev/null
+++ b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/data/IBatch2WorkChunkMetadataViewRepository.java
@@ -0,0 +1,21 @@
+package ca.uhn.fhir.jpa.dao.data;
+
+import ca.uhn.fhir.batch2.model.WorkChunkStatusEnum;
+import ca.uhn.fhir.jpa.entity.Batch2WorkChunkMetadataView;
+import org.springframework.data.domain.Page;
+import org.springframework.data.domain.Pageable;
+import org.springframework.data.jpa.repository.JpaRepository;
+import org.springframework.data.jpa.repository.Query;
+import org.springframework.data.repository.query.Param;
+
+import java.util.Collection;
+
+public interface IBatch2WorkChunkMetadataViewRepository extends JpaRepository {
+
+ @Query("SELECT v FROM Batch2WorkChunkMetadataView v WHERE v.myInstanceId = :instanceId AND v.myStatus IN :states "
+ + " ORDER BY v.myInstanceId, v.myTargetStepId, v.myStatus, v.mySequence, v.myId ASC")
+ Page fetchWorkChunkMetadataForJobInStates(
+ Pageable thePageRequest,
+ @Param("instanceId") String theInstanceId,
+ @Param("states") Collection theStates);
+}
diff --git a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/data/IBatch2WorkChunkRepository.java b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/data/IBatch2WorkChunkRepository.java
index 053fc35f89e..0dc9243290e 100644
--- a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/data/IBatch2WorkChunkRepository.java
+++ b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/data/IBatch2WorkChunkRepository.java
@@ -49,7 +49,8 @@ public interface IBatch2WorkChunkRepository
@Query("SELECT new Batch2WorkChunkEntity("
+ "e.myId, e.mySequence, e.myJobDefinitionId, e.myJobDefinitionVersion, e.myInstanceId, e.myTargetStepId, e.myStatus,"
+ "e.myCreateTime, e.myStartTime, e.myUpdateTime, e.myEndTime,"
- + "e.myErrorMessage, e.myErrorCount, e.myRecordsProcessed, e.myWarningMessage"
+ + "e.myErrorMessage, e.myErrorCount, e.myRecordsProcessed, e.myWarningMessage,"
+ + "e.myNextPollTime, e.myPollAttempts"
+ ") FROM Batch2WorkChunkEntity e WHERE e.myInstanceId = :instanceId ORDER BY e.mySequence ASC, e.myId ASC")
List fetchChunksNoData(Pageable thePageRequest, @Param("instanceId") String theInstanceId);
@@ -75,6 +76,24 @@ public interface IBatch2WorkChunkRepository
@Param("status") WorkChunkStatusEnum theInProgress,
@Param("warningMessage") String theWarningMessage);
+ @Modifying
+ @Query(
+ "UPDATE Batch2WorkChunkEntity e SET e.myStatus = :status, e.myNextPollTime = :nextPollTime, e.myPollAttempts = e.myPollAttempts + 1 WHERE e.myId = :id AND e.myStatus IN(:states)")
+ int updateWorkChunkNextPollTime(
+ @Param("id") String theChunkId,
+ @Param("status") WorkChunkStatusEnum theStatus,
+ @Param("states") Set theInitialStates,
+ @Param("nextPollTime") Date theNextPollTime);
+
+ @Modifying
+ @Query(
+ "UPDATE Batch2WorkChunkEntity e SET e.myStatus = :status, e.myNextPollTime = null WHERE e.myInstanceId = :instanceId AND e.myStatus IN(:states) AND e.myNextPollTime <= :pollTime")
+ int updateWorkChunksForPollWaiting(
+ @Param("instanceId") String theInstanceId,
+ @Param("pollTime") Date theTime,
+ @Param("states") Set theInitialStates,
+ @Param("status") WorkChunkStatusEnum theNewStatus);
+
@Modifying
@Query(
"UPDATE Batch2WorkChunkEntity e SET e.myStatus = :status, e.myEndTime = :et, e.mySerializedData = null, e.mySerializedDataVc = null, e.myErrorMessage = :em WHERE e.myId IN(:ids)")
@@ -102,6 +121,22 @@ public interface IBatch2WorkChunkRepository
@Param("status") WorkChunkStatusEnum theInProgress,
@Param("startStatuses") Collection theStartStatuses);
+ @Modifying
+ @Query("UPDATE Batch2WorkChunkEntity e SET e.myStatus = :newStatus WHERE e.myId = :id AND e.myStatus = :oldStatus")
+ int updateChunkStatus(
+ @Param("id") String theChunkId,
+ @Param("oldStatus") WorkChunkStatusEnum theOldStatus,
+ @Param("newStatus") WorkChunkStatusEnum theNewStatus);
+
+ @Modifying
+ @Query(
+ "UPDATE Batch2WorkChunkEntity e SET e.myStatus = :newStatus WHERE e.myInstanceId = :instanceId AND e.myTargetStepId = :stepId AND e.myStatus IN ( :oldStatuses )")
+ int updateAllChunksForStepWithStatus(
+ @Param("instanceId") String theInstanceId,
+ @Param("stepId") String theStepId,
+ @Param("oldStatuses") List theOldStatuses,
+ @Param("newStatus") WorkChunkStatusEnum theNewStatus);
+
@Modifying
@Query("DELETE FROM Batch2WorkChunkEntity e WHERE e.myInstanceId = :instanceId")
int deleteAllForInstance(@Param("instanceId") String theInstanceId);
diff --git a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/entity/Batch2WorkChunkEntity.java b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/entity/Batch2WorkChunkEntity.java
index 42281ac475f..ff34c74b6fd 100644
--- a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/entity/Batch2WorkChunkEntity.java
+++ b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/entity/Batch2WorkChunkEntity.java
@@ -19,6 +19,7 @@
*/
package ca.uhn.fhir.jpa.entity;
+import ca.uhn.fhir.batch2.model.WorkChunk;
import ca.uhn.fhir.batch2.model.WorkChunkStatusEnum;
import jakarta.persistence.Basic;
import jakarta.persistence.Column;
@@ -50,7 +51,10 @@ import static org.apache.commons.lang3.StringUtils.left;
@Entity
@Table(
name = "BT2_WORK_CHUNK",
- indexes = {@Index(name = "IDX_BT2WC_II_SEQ", columnList = "INSTANCE_ID,SEQ")})
+ indexes = {
+ @Index(name = "IDX_BT2WC_II_SEQ", columnList = "INSTANCE_ID,SEQ"),
+ @Index(name = "IDX_BT2WC_II_SI_S_SEQ_ID", columnList = "INSTANCE_ID,TGT_STEP_ID,STAT,SEQ,ID")
+ })
public class Batch2WorkChunkEntity implements Serializable {
public static final int ERROR_MSG_MAX_LENGTH = 500;
@@ -125,6 +129,19 @@ public class Batch2WorkChunkEntity implements Serializable {
@Column(name = "WARNING_MSG", length = WARNING_MSG_MAX_LENGTH, nullable = true)
private String myWarningMessage;
+ /**
+ * The next time the work chunk can attempt to rerun its work step.
+ */
+ @Column(name = "NEXT_POLL_TIME", nullable = true)
+ @Temporal(TemporalType.TIMESTAMP)
+ private Date myNextPollTime;
+
+ /**
+ * The number of times the work chunk has had its state set back to POLL_WAITING.
+ */
+ @Column(name = "POLL_ATTEMPTS", nullable = true)
+ private int myPollAttempts;
+
/**
* Default constructor for Hibernate.
*/
@@ -148,7 +165,9 @@ public class Batch2WorkChunkEntity implements Serializable {
String theErrorMessage,
int theErrorCount,
Integer theRecordsProcessed,
- String theWarningMessage) {
+ String theWarningMessage,
+ Date theNextPollTime,
+ Integer thePollAttempts) {
myId = theId;
mySequence = theSequence;
myJobDefinitionId = theJobDefinitionId;
@@ -164,6 +183,32 @@ public class Batch2WorkChunkEntity implements Serializable {
myErrorCount = theErrorCount;
myRecordsProcessed = theRecordsProcessed;
myWarningMessage = theWarningMessage;
+ myNextPollTime = theNextPollTime;
+ myPollAttempts = thePollAttempts;
+ }
+
+ public static Batch2WorkChunkEntity fromWorkChunk(WorkChunk theWorkChunk) {
+ Batch2WorkChunkEntity entity = new Batch2WorkChunkEntity(
+ theWorkChunk.getId(),
+ theWorkChunk.getSequence(),
+ theWorkChunk.getJobDefinitionId(),
+ theWorkChunk.getJobDefinitionVersion(),
+ theWorkChunk.getInstanceId(),
+ theWorkChunk.getTargetStepId(),
+ theWorkChunk.getStatus(),
+ theWorkChunk.getCreateTime(),
+ theWorkChunk.getStartTime(),
+ theWorkChunk.getUpdateTime(),
+ theWorkChunk.getEndTime(),
+ theWorkChunk.getErrorMessage(),
+ theWorkChunk.getErrorCount(),
+ theWorkChunk.getRecordsProcessed(),
+ theWorkChunk.getWarningMessage(),
+ theWorkChunk.getNextPollTime(),
+ theWorkChunk.getPollAttempts());
+ entity.setSerializedData(theWorkChunk.getData());
+
+ return entity;
}
public int getErrorCount() {
@@ -299,6 +344,22 @@ public class Batch2WorkChunkEntity implements Serializable {
myInstanceId = theInstanceId;
}
+ public Date getNextPollTime() {
+ return myNextPollTime;
+ }
+
+ public void setNextPollTime(Date theNextPollTime) {
+ myNextPollTime = theNextPollTime;
+ }
+
+ public int getPollAttempts() {
+ return myPollAttempts;
+ }
+
+ public void setPollAttempts(int thePollAttempts) {
+ myPollAttempts = thePollAttempts;
+ }
+
@Override
public String toString() {
return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE)
@@ -318,6 +379,8 @@ public class Batch2WorkChunkEntity implements Serializable {
.append("status", myStatus)
.append("errorMessage", myErrorMessage)
.append("warningMessage", myWarningMessage)
+ .append("nextPollTime", myNextPollTime)
+ .append("pollAttempts", myPollAttempts)
.toString();
}
}
diff --git a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/entity/Batch2WorkChunkMetadataView.java b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/entity/Batch2WorkChunkMetadataView.java
new file mode 100644
index 00000000000..4034a13f7cd
--- /dev/null
+++ b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/entity/Batch2WorkChunkMetadataView.java
@@ -0,0 +1,123 @@
+package ca.uhn.fhir.jpa.entity;
+
+import ca.uhn.fhir.batch2.model.WorkChunkMetadata;
+import ca.uhn.fhir.batch2.model.WorkChunkStatusEnum;
+import jakarta.persistence.Column;
+import jakarta.persistence.Entity;
+import jakarta.persistence.EnumType;
+import jakarta.persistence.Enumerated;
+import jakarta.persistence.Id;
+import org.hibernate.annotations.Immutable;
+import org.hibernate.annotations.Subselect;
+
+import java.io.Serializable;
+
+import static ca.uhn.fhir.batch2.model.JobDefinition.ID_MAX_LENGTH;
+
+/**
+ * A view for a Work Chunk that contains only the most necessary information
+ * to satisfy the no-data path.
+ */
+@Entity
+@Immutable
+@Subselect("SELECT e.id as id, "
+ + " e.seq as seq,"
+ + " e.stat as state, "
+ + " e.instance_id as instance_id, "
+ + " e.definition_id as job_definition_id, "
+ + " e.definition_ver as job_definition_version, "
+ + " e.tgt_step_id as target_step_id "
+ + "FROM BT2_WORK_CHUNK e")
+public class Batch2WorkChunkMetadataView implements Serializable {
+
+ @Id
+ @Column(name = "ID", length = ID_MAX_LENGTH)
+ private String myId;
+
+ @Column(name = "SEQ", nullable = false)
+ private int mySequence;
+
+ @Column(name = "STATE", length = ID_MAX_LENGTH, nullable = false)
+ @Enumerated(EnumType.STRING)
+ private WorkChunkStatusEnum myStatus;
+
+ @Column(name = "INSTANCE_ID", length = ID_MAX_LENGTH, nullable = false)
+ private String myInstanceId;
+
+ @Column(name = "JOB_DEFINITION_ID", length = ID_MAX_LENGTH, nullable = false)
+ private String myJobDefinitionId;
+
+ @Column(name = "JOB_DEFINITION_VERSION", nullable = false)
+ private int myJobDefinitionVersion;
+
+ @Column(name = "TARGET_STEP_ID", length = ID_MAX_LENGTH, nullable = false)
+ private String myTargetStepId;
+
+ public String getId() {
+ return myId;
+ }
+
+ public void setId(String theId) {
+ myId = theId;
+ }
+
+ public int getSequence() {
+ return mySequence;
+ }
+
+ public void setSequence(int theSequence) {
+ mySequence = theSequence;
+ }
+
+ public WorkChunkStatusEnum getStatus() {
+ return myStatus;
+ }
+
+ public void setStatus(WorkChunkStatusEnum theStatus) {
+ myStatus = theStatus;
+ }
+
+ public String getInstanceId() {
+ return myInstanceId;
+ }
+
+ public void setInstanceId(String theInstanceId) {
+ myInstanceId = theInstanceId;
+ }
+
+ public String getJobDefinitionId() {
+ return myJobDefinitionId;
+ }
+
+ public void setJobDefinitionId(String theJobDefinitionId) {
+ myJobDefinitionId = theJobDefinitionId;
+ }
+
+ public int getJobDefinitionVersion() {
+ return myJobDefinitionVersion;
+ }
+
+ public void setJobDefinitionVersion(int theJobDefinitionVersion) {
+ myJobDefinitionVersion = theJobDefinitionVersion;
+ }
+
+ public String getTargetStepId() {
+ return myTargetStepId;
+ }
+
+ public void setTargetStepId(String theTargetStepId) {
+ myTargetStepId = theTargetStepId;
+ }
+
+ public WorkChunkMetadata toChunkMetadata() {
+ WorkChunkMetadata metadata = new WorkChunkMetadata();
+ metadata.setId(getId());
+ metadata.setInstanceId(getInstanceId());
+ metadata.setSequence(getSequence());
+ metadata.setStatus(getStatus());
+ metadata.setJobDefinitionId(getJobDefinitionId());
+ metadata.setJobDefinitionVersion(getJobDefinitionVersion());
+ metadata.setTargetStepId(getTargetStepId());
+ return metadata;
+ }
+}
diff --git a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/migrate/tasks/HapiFhirJpaMigrationTasks.java b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/migrate/tasks/HapiFhirJpaMigrationTasks.java
index 86e27081679..3c421278e10 100644
--- a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/migrate/tasks/HapiFhirJpaMigrationTasks.java
+++ b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/migrate/tasks/HapiFhirJpaMigrationTasks.java
@@ -293,6 +293,23 @@ public class HapiFhirJpaMigrationTasks extends BaseMigrationTasks {
// This fix will work for MSSQL or Oracle.
version.addTask(new ForceIdMigrationFixTask(version.getRelease(), "20231222.1"));
+
+ // add index to Batch2WorkChunkEntity
+ Builder.BuilderWithTableName workChunkTable = version.onTable("BT2_WORK_CHUNK");
+
+ workChunkTable
+ .addIndex("20240321.1", "IDX_BT2WC_II_SI_S_SEQ_ID")
+ .unique(false)
+ .withColumns("INSTANCE_ID", "TGT_STEP_ID", "STAT", "SEQ", "ID");
+
+ // add columns to Batch2WorkChunkEntity
+ Builder.BuilderWithTableName batch2WorkChunkTable = version.onTable("BT2_WORK_CHUNK");
+
+ batch2WorkChunkTable
+ .addColumn("20240322.1", "NEXT_POLL_TIME")
+ .nullable()
+ .type(ColumnTypeEnum.DATE_TIMESTAMP);
+ batch2WorkChunkTable.addColumn("20240322.2", "POLL_ATTEMPTS").nullable().type(ColumnTypeEnum.INT);
}
private void init680_Part2() {
diff --git a/hapi-fhir-jpaserver-base/src/test/java/ca/uhn/fhir/jpa/batch2/JpaJobPersistenceImplTest.java b/hapi-fhir-jpaserver-base/src/test/java/ca/uhn/fhir/jpa/batch2/JpaJobPersistenceImplTest.java
index ba013f714bc..14b21559d52 100644
--- a/hapi-fhir-jpaserver-base/src/test/java/ca/uhn/fhir/jpa/batch2/JpaJobPersistenceImplTest.java
+++ b/hapi-fhir-jpaserver-base/src/test/java/ca/uhn/fhir/jpa/batch2/JpaJobPersistenceImplTest.java
@@ -4,6 +4,7 @@ import ca.uhn.fhir.batch2.api.JobOperationResultJson;
import ca.uhn.fhir.batch2.model.FetchJobInstancesRequest;
import ca.uhn.fhir.batch2.model.JobInstance;
import ca.uhn.fhir.batch2.model.StatusEnum;
+import ca.uhn.fhir.batch2.model.WorkChunkStatusEnum;
import ca.uhn.fhir.jpa.dao.data.IBatch2JobInstanceRepository;
import ca.uhn.fhir.jpa.dao.data.IBatch2WorkChunkRepository;
import ca.uhn.fhir.jpa.dao.tx.IHapiTransactionService;
@@ -31,6 +32,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoInteractions;
import static org.mockito.Mockito.when;
@ExtendWith(MockitoExtension.class)
diff --git a/hapi-fhir-jpaserver-base/src/test/java/ca/uhn/fhir/jpa/bulk/export/svc/BulkDataExportJobSchedulingHelperImplTest.java b/hapi-fhir-jpaserver-base/src/test/java/ca/uhn/fhir/jpa/bulk/export/svc/BulkDataExportJobSchedulingHelperImplTest.java
index f7c90efb7c8..56e1080adab 100644
--- a/hapi-fhir-jpaserver-base/src/test/java/ca/uhn/fhir/jpa/bulk/export/svc/BulkDataExportJobSchedulingHelperImplTest.java
+++ b/hapi-fhir-jpaserver-base/src/test/java/ca/uhn/fhir/jpa/bulk/export/svc/BulkDataExportJobSchedulingHelperImplTest.java
@@ -30,6 +30,8 @@ import org.springframework.transaction.support.TransactionCallback;
import org.springframework.transaction.support.TransactionTemplate;
import jakarta.annotation.Nonnull;
+
+import java.time.Instant;
import java.time.LocalDateTime;
import java.time.ZoneId;
import java.time.temporal.ChronoUnit;
@@ -43,6 +45,7 @@ import java.util.stream.IntStream;
import static org.exparity.hamcrest.date.DateMatchers.within;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.ArgumentMatchers.eq;
@@ -97,7 +100,17 @@ public class BulkDataExportJobSchedulingHelperImplTest {
verify(myJpaJobPersistence, never()).deleteInstanceAndChunks(anyString());
final Date cutoffDate = myCutoffCaptor.getValue();
- assertEquals(DateUtils.truncate(computeDateFromConfig(expectedRetentionHours), Calendar.SECOND), DateUtils.truncate(cutoffDate, Calendar.SECOND));
+ Date expectedCutoff = computeDateFromConfig(expectedRetentionHours);
+ verifyDatesWithinSeconds(expectedCutoff, cutoffDate, 2);
+ }
+
+ private void verifyDatesWithinSeconds(Date theExpected, Date theActual, int theSeconds) {
+ Instant expectedInstant = theExpected.toInstant();
+ Instant actualInstant = theActual.toInstant();
+
+ String msg = String.format("Expected time not within %d s", theSeconds);
+ assertTrue(expectedInstant.plus(theSeconds, ChronoUnit.SECONDS).isAfter(actualInstant), msg);
+ assertTrue(expectedInstant.minus(theSeconds, ChronoUnit.SECONDS).isBefore(actualInstant), msg);
}
@Test
diff --git a/hapi-fhir-jpaserver-elastic-test-utilities/pom.xml b/hapi-fhir-jpaserver-elastic-test-utilities/pom.xml
index bb2281e78d6..157c68d5c0c 100644
--- a/hapi-fhir-jpaserver-elastic-test-utilities/pom.xml
+++ b/hapi-fhir-jpaserver-elastic-test-utilities/pom.xml
@@ -6,7 +6,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 7.3.0-SNAPSHOT
+ 7.3.1-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-jpaserver-hfql/pom.xml b/hapi-fhir-jpaserver-hfql/pom.xml
index 1bdf1f4d5eb..706d5ca1335 100644
--- a/hapi-fhir-jpaserver-hfql/pom.xml
+++ b/hapi-fhir-jpaserver-hfql/pom.xml
@@ -3,7 +3,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 7.3.0-SNAPSHOT
+ 7.3.1-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-jpaserver-ips/pom.xml b/hapi-fhir-jpaserver-ips/pom.xml
index 164b0a28831..ca914b4b1bd 100644
--- a/hapi-fhir-jpaserver-ips/pom.xml
+++ b/hapi-fhir-jpaserver-ips/pom.xml
@@ -3,7 +3,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 7.3.0-SNAPSHOT
+ 7.3.1-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-jpaserver-mdm/pom.xml b/hapi-fhir-jpaserver-mdm/pom.xml
index 1a7598268ab..3030cb455b6 100644
--- a/hapi-fhir-jpaserver-mdm/pom.xml
+++ b/hapi-fhir-jpaserver-mdm/pom.xml
@@ -6,7 +6,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 7.3.0-SNAPSHOT
+ 7.3.1-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-jpaserver-model/pom.xml b/hapi-fhir-jpaserver-model/pom.xml
index 2cee62cefaf..d2028f332ae 100644
--- a/hapi-fhir-jpaserver-model/pom.xml
+++ b/hapi-fhir-jpaserver-model/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 7.3.0-SNAPSHOT
+ 7.3.1-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-jpaserver-model/src/main/java/ca/uhn/fhir/jpa/model/entity/BaseTag.java b/hapi-fhir-jpaserver-model/src/main/java/ca/uhn/fhir/jpa/model/entity/BaseTag.java
index 54d2405fba3..0bf655a77ae 100644
--- a/hapi-fhir-jpaserver-model/src/main/java/ca/uhn/fhir/jpa/model/entity/BaseTag.java
+++ b/hapi-fhir-jpaserver-model/src/main/java/ca/uhn/fhir/jpa/model/entity/BaseTag.java
@@ -31,6 +31,7 @@ public abstract class BaseTag extends BasePartitionable implements Serializable
private static final long serialVersionUID = 1L;
+ // many baseTags -> one tag definition
@ManyToOne(cascade = {})
@JoinColumn(name = "TAG_ID", nullable = false)
private TagDefinition myTag;
diff --git a/hapi-fhir-jpaserver-model/src/main/java/ca/uhn/fhir/jpa/model/entity/TagDefinition.java b/hapi-fhir-jpaserver-model/src/main/java/ca/uhn/fhir/jpa/model/entity/TagDefinition.java
index fe3868c8313..ad4e6309508 100644
--- a/hapi-fhir-jpaserver-model/src/main/java/ca/uhn/fhir/jpa/model/entity/TagDefinition.java
+++ b/hapi-fhir-jpaserver-model/src/main/java/ca/uhn/fhir/jpa/model/entity/TagDefinition.java
@@ -67,12 +67,14 @@ public class TagDefinition implements Serializable {
@Column(name = "TAG_ID")
private Long myId;
+ // one tag definition -> many resource tags
@OneToMany(
cascade = {},
fetch = FetchType.LAZY,
mappedBy = "myTag")
private Collection myResources;
+ // one tag definition -> many history
@OneToMany(
cascade = {},
fetch = FetchType.LAZY,
diff --git a/hapi-fhir-jpaserver-searchparam/pom.xml b/hapi-fhir-jpaserver-searchparam/pom.xml
index f0e7c3a6b94..87f9dd268b2 100755
--- a/hapi-fhir-jpaserver-searchparam/pom.xml
+++ b/hapi-fhir-jpaserver-searchparam/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 7.3.0-SNAPSHOT
+ 7.3.1-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-jpaserver-subscription/pom.xml b/hapi-fhir-jpaserver-subscription/pom.xml
index a79817e0242..e608341ec73 100644
--- a/hapi-fhir-jpaserver-subscription/pom.xml
+++ b/hapi-fhir-jpaserver-subscription/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 7.3.0-SNAPSHOT
+ 7.3.1-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-jpaserver-subscription/src/test/java/ca/uhn/fhir/jpa/subscription/module/subscriber/SubscriptionMatchingSubscriberTest.java b/hapi-fhir-jpaserver-subscription/src/test/java/ca/uhn/fhir/jpa/subscription/module/subscriber/SubscriptionMatchingSubscriberTest.java
index 1c0dcd424e5..3581da0a156 100644
--- a/hapi-fhir-jpaserver-subscription/src/test/java/ca/uhn/fhir/jpa/subscription/module/subscriber/SubscriptionMatchingSubscriberTest.java
+++ b/hapi-fhir-jpaserver-subscription/src/test/java/ca/uhn/fhir/jpa/subscription/module/subscriber/SubscriptionMatchingSubscriberTest.java
@@ -505,7 +505,7 @@ public class SubscriptionMatchingSubscriberTest extends BaseBlockingQueueSubscri
subscriber.matchActiveSubscriptionsAndDeliver(message);
- verify(myCanonicalSubscription, atLeastOnce()).getSendDeleteMessages();
+ verify(myCanonicalSubscription).getSendDeleteMessages();
}
@Test
diff --git a/hapi-fhir-jpaserver-test-dstu2/pom.xml b/hapi-fhir-jpaserver-test-dstu2/pom.xml
index a95f7d93414..ccc242c944d 100644
--- a/hapi-fhir-jpaserver-test-dstu2/pom.xml
+++ b/hapi-fhir-jpaserver-test-dstu2/pom.xml
@@ -6,7 +6,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 7.3.0-SNAPSHOT
+ 7.3.1-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-jpaserver-test-dstu2/src/test/java/ca/uhn/fhir/jpa/dao/dstu2/FhirResourceDaoDstu2Test.java b/hapi-fhir-jpaserver-test-dstu2/src/test/java/ca/uhn/fhir/jpa/dao/dstu2/FhirResourceDaoDstu2Test.java
index 1d0902574bf..c27ecc6bc2d 100644
--- a/hapi-fhir-jpaserver-test-dstu2/src/test/java/ca/uhn/fhir/jpa/dao/dstu2/FhirResourceDaoDstu2Test.java
+++ b/hapi-fhir-jpaserver-test-dstu2/src/test/java/ca/uhn/fhir/jpa/dao/dstu2/FhirResourceDaoDstu2Test.java
@@ -2209,28 +2209,28 @@ public class FhirResourceDaoDstu2Test extends BaseJpaDstu2Test {
p.addName().addFamily(methodName);
IIdType id1 = myPatientDao.create(p, mySrd).getId().toUnqualifiedVersionless();
- sleepUntilTimeChanges();
+ sleepUntilTimeChange();
p = new Patient();
p.addIdentifier().setSystem("urn:system2").setValue(methodName);
p.addName().addFamily(methodName);
IIdType id2 = myPatientDao.create(p, mySrd).getId().toUnqualifiedVersionless();
- sleepUntilTimeChanges();
+ sleepUntilTimeChange();
p = new Patient();
p.addIdentifier().setSystem("urn:system3").setValue(methodName);
p.addName().addFamily(methodName);
IIdType id3 = myPatientDao.create(p, mySrd).getId().toUnqualifiedVersionless();
- sleepUntilTimeChanges();
+ sleepUntilTimeChange();
p = new Patient();
p.addIdentifier().setSystem("urn:system4").setValue(methodName);
p.addName().addFamily(methodName);
IIdType id4 = myPatientDao.create(p, mySrd).getId().toUnqualifiedVersionless();
- sleepUntilTimeChanges();
+ sleepUntilTimeChange();
SearchParameterMap pm;
List actual;
diff --git a/hapi-fhir-jpaserver-test-dstu3/pom.xml b/hapi-fhir-jpaserver-test-dstu3/pom.xml
index c94281fbf8f..bb969873766 100644
--- a/hapi-fhir-jpaserver-test-dstu3/pom.xml
+++ b/hapi-fhir-jpaserver-test-dstu3/pom.xml
@@ -6,7 +6,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 7.3.0-SNAPSHOT
+ 7.3.1-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-jpaserver-test-r4/pom.xml b/hapi-fhir-jpaserver-test-r4/pom.xml
index a9b47640828..cc21ea5b56e 100644
--- a/hapi-fhir-jpaserver-test-r4/pom.xml
+++ b/hapi-fhir-jpaserver-test-r4/pom.xml
@@ -6,7 +6,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 7.3.0-SNAPSHOT
+ 7.3.1-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/batch2/Batch2CoordinatorIT.java b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/batch2/Batch2CoordinatorIT.java
index 2e304e08373..f09f823f7a5 100644
--- a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/batch2/Batch2CoordinatorIT.java
+++ b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/batch2/Batch2CoordinatorIT.java
@@ -10,6 +10,7 @@ import ca.uhn.fhir.batch2.api.IJobStepWorker;
import ca.uhn.fhir.batch2.api.ILastJobStepWorker;
import ca.uhn.fhir.batch2.api.IReductionStepWorker;
import ca.uhn.fhir.batch2.api.JobExecutionFailedException;
+import ca.uhn.fhir.batch2.api.RetryChunkLaterException;
import ca.uhn.fhir.batch2.api.RunOutcome;
import ca.uhn.fhir.batch2.api.StepExecutionDetails;
import ca.uhn.fhir.batch2.api.VoidModel;
@@ -27,15 +28,20 @@ import ca.uhn.fhir.jpa.subscription.channel.api.IChannelFactory;
import ca.uhn.fhir.jpa.subscription.channel.impl.LinkedBlockingChannel;
import ca.uhn.fhir.jpa.test.BaseJpaR4Test;
import ca.uhn.fhir.jpa.test.Batch2JobHelper;
+import ca.uhn.fhir.jpa.test.config.Batch2FastSchedulerConfig;
+import ca.uhn.fhir.jpa.test.config.TestR4Config;
import ca.uhn.fhir.model.api.IModelJson;
import ca.uhn.fhir.rest.api.server.SystemRequestDetails;
+import ca.uhn.fhir.test.utilities.UnregisterScheduledProcessor;
import ca.uhn.fhir.util.JsonUtil;
import ca.uhn.test.concurrency.PointcutLatch;
+import ca.uhn.test.util.LogbackCaptureTestExtension;
import com.fasterxml.jackson.annotation.JsonProperty;
import jakarta.annotation.Nonnull;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.RegisterExtension;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.ValueSource;
import org.slf4j.Logger;
@@ -43,11 +49,21 @@ import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.domain.Page;
import org.springframework.data.domain.Sort;
+import org.springframework.messaging.MessageHandler;
+import org.springframework.test.context.ContextConfiguration;
+import org.springframework.test.context.TestPropertySource;
+import org.testcontainers.shaded.org.awaitility.Awaitility;
+import java.time.Duration;
+import java.time.temporal.ChronoUnit;
import java.util.ArrayList;
+import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
+import java.util.Map;
import java.util.Optional;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
@@ -60,6 +76,13 @@ import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
+@ContextConfiguration(classes = {
+ Batch2FastSchedulerConfig.class
+})
+@TestPropertySource(properties = {
+ // These tests require scheduling to work
+ UnregisterScheduledProcessor.SCHEDULING_DISABLED_EQUALS_FALSE
+})
public class Batch2CoordinatorIT extends BaseJpaR4Test {
private static final Logger ourLog = LoggerFactory.getLogger(Batch2CoordinatorIT.class);
@@ -81,6 +104,9 @@ public class Batch2CoordinatorIT extends BaseJpaR4Test {
@Autowired
IJobPersistence myJobPersistence;
+ @RegisterExtension
+ LogbackCaptureTestExtension myLogbackCaptureTestExtension = new LogbackCaptureTestExtension();
+
private final PointcutLatch myFirstStepLatch = new PointcutLatch("First Step");
private final PointcutLatch myLastStepLatch = new PointcutLatch("Last Step");
private IJobCompletionHandler myCompletionHandler;
@@ -91,6 +117,10 @@ public class Batch2CoordinatorIT extends BaseJpaR4Test {
return RunOutcome.SUCCESS;
}
+ static {
+ TestR4Config.ourMaxThreads = 100;
+ }
+
@Override
@BeforeEach
public void before() throws Exception {
@@ -117,7 +147,7 @@ public class Batch2CoordinatorIT extends BaseJpaR4Test {
// final step
ILastJobStepWorker last = (step, sink) -> RunOutcome.SUCCESS;
// job definition
- String jobId = new Exception().getStackTrace()[0].getMethodName();
+ String jobId = getMethodNameForJobId();
JobDefinition extends IModelJson> jd = JobDefinition.newBuilder()
.setJobDefinitionId(jobId)
.setJobDescription("test job")
@@ -183,7 +213,7 @@ public class Batch2CoordinatorIT extends BaseJpaR4Test {
IJobStepWorker firstStep = (step, sink) -> callLatch(myFirstStepLatch, step);
IJobStepWorker lastStep = (step, sink) -> fail();
- String jobId = new Exception().getStackTrace()[0].getMethodName();
+ String jobId = getMethodNameForJobId();
JobDefinition extends IModelJson> definition = buildGatedJobDefinition(jobId, firstStep, lastStep);
myJobDefinitionRegistry.addJobDefinition(definition);
@@ -192,6 +222,7 @@ public class Batch2CoordinatorIT extends BaseJpaR4Test {
myFirstStepLatch.setExpectedCount(1);
Batch2JobStartResponse startResponse = myJobCoordinator.startInstance(new SystemRequestDetails(), request);
+ myBatch2JobHelper.runMaintenancePass();
myFirstStepLatch.awaitExpected();
myBatch2JobHelper.awaitJobCompletion(startResponse.getInstanceId());
@@ -216,11 +247,10 @@ public class Batch2CoordinatorIT extends BaseJpaR4Test {
myFirstStepLatch.setExpectedCount(1);
myLastStepLatch.setExpectedCount(1);
String batchJobId = myJobCoordinator.startInstance(new SystemRequestDetails(), request).getInstanceId();
+ myBatch2JobHelper.runMaintenancePass();
myFirstStepLatch.awaitExpected();
-
myBatch2JobHelper.assertFastTracking(batchJobId);
- // Since there was only one chunk, the job should proceed without requiring a maintenance pass
myBatch2JobHelper.awaitJobCompletion(batchJobId);
myLastStepLatch.awaitExpected();
@@ -234,10 +264,92 @@ public class Batch2CoordinatorIT extends BaseJpaR4Test {
assertEquals(1.0, jobInstance.getProgress());
}
+ /**
+ * This test verifies that if we have a workchunks being processed by the queue,
+ * and the maintenance job kicks in, it won't necessarily advance the steps.
+ */
@Test
- public void reductionStepFailing_willFailJob() throws InterruptedException {
+ public void gatedJob_whenMaintenanceRunHappensDuringMsgProcessing_doesNotAdvance() throws InterruptedException {
// setup
- String jobId = new Exception().getStackTrace()[0].getMethodName();
+ // we disable the scheduler because multiple schedulers running simultaneously
+ // might cause database collisions we do not expect (not what we're testing)
+ myBatch2JobHelper.enableMaintenanceRunner(false);
+ String jobId = getMethodNameForJobId();
+ int chunksToMake = 5;
+ AtomicInteger secondGateCounter = new AtomicInteger();
+ AtomicBoolean reductionCheck = new AtomicBoolean(false);
+ // we will listen into the message queue so we can force actions on it
+ MessageHandler handler = message -> {
+ /*
+ * We will force a run of the maintenance job
+ * to simulate the situation in which a chunk is
+ * still being processed by the WorkChunkMessageHandler
+ * (and thus, not available yet).
+ */
+ myBatch2JobHelper.forceRunMaintenancePass();
+ };
+
+ buildAndDefine3StepReductionJob(jobId, new IReductionStepHandler() {
+
+ @Override
+ public void firstStep(StepExecutionDetails theStep, IJobDataSink theDataSink) {
+ for (int i = 0; i < chunksToMake; i++) {
+ theDataSink.accept(new FirstStepOutput());
+ }
+ }
+
+ @Override
+ public void secondStep(StepExecutionDetails theStep, IJobDataSink theDataSink) {
+ // no new chunks
+ SecondStepOutput output = new SecondStepOutput();
+ theDataSink.accept(output);
+ }
+
+ @Override
+ public void reductionStepConsume(ChunkExecutionDetails theChunkDetails, IJobDataSink theDataSink) {
+ // we expect to get one here
+ int val = secondGateCounter.getAndIncrement();
+ }
+
+ @Override
+ public void reductionStepRun(StepExecutionDetails theStepExecutionDetails, IJobDataSink theDataSink) {
+ reductionCheck.set(true);
+ theDataSink.accept(new ReductionStepOutput(new ArrayList<>()));
+ }
+ });
+
+ try {
+ myWorkChannel.subscribe(handler);
+
+ // test
+ JobInstanceStartRequest request = buildRequest(jobId);
+ myFirstStepLatch.setExpectedCount(1);
+ Batch2JobStartResponse startResponse = myJobCoordinator.startInstance(new SystemRequestDetails(), request);
+
+ String instanceId = startResponse.getInstanceId();
+
+ // wait
+ myBatch2JobHelper.awaitJobCompletion(instanceId);
+
+ // verify
+ Optional instanceOp = myJobPersistence.fetchInstance(instanceId);
+ assertTrue(instanceOp.isPresent());
+ JobInstance jobInstance = instanceOp.get();
+ assertTrue(reductionCheck.get());
+ assertEquals(chunksToMake, secondGateCounter.get());
+
+ assertEquals(StatusEnum.COMPLETED, jobInstance.getStatus());
+ assertEquals(1.0, jobInstance.getProgress());
+ } finally {
+ myWorkChannel.unsubscribe(handler);
+ myBatch2JobHelper.enableMaintenanceRunner(true);
+ }
+ }
+
+ @Test
+ public void reductionStepFailing_willFailJob() {
+ // setup
+ String jobId = getMethodNameForJobId();
int totalChunks = 3;
AtomicInteger chunkCounter = new AtomicInteger();
String error = "this is an error";
@@ -292,22 +404,17 @@ public class Batch2CoordinatorIT extends BaseJpaR4Test {
@Test
public void testJobWithReductionStepFiresCompletionHandler() throws InterruptedException {
// setup
- String jobId = new Exception().getStackTrace()[0].getMethodName();
+ String jobId = getMethodNameForJobId();
String testInfo = "test";
int totalCalls = 2;
AtomicInteger secondStepInt = new AtomicInteger();
AtomicBoolean completionBool = new AtomicBoolean();
- AtomicBoolean jobStatusBool = new AtomicBoolean();
-
myCompletionHandler = (params) -> {
- // ensure our completion handler fires
+ // ensure our completion handler gets the right status
+ assertEquals(StatusEnum.COMPLETED, params.getInstance().getStatus());
completionBool.getAndSet(true);
-
- if (StatusEnum.COMPLETED.equals(params.getInstance().getStatus())){
- jobStatusBool.getAndSet(true);
- }
};
buildAndDefine3StepReductionJob(jobId, new IReductionStepHandler() {
@@ -351,10 +458,11 @@ public class Batch2CoordinatorIT extends BaseJpaR4Test {
Batch2JobStartResponse startResponse = myJobCoordinator.startInstance(new SystemRequestDetails(), request);
String instanceId = startResponse.getInstanceId();
+ myBatch2JobHelper.runMaintenancePass();
myFirstStepLatch.awaitExpected();
assertNotNull(instanceId);
- myBatch2JobHelper.awaitGatedStepId(FIRST_STEP_ID, instanceId);
+ myBatch2JobHelper.awaitGatedStepId(SECOND_STEP_ID, instanceId);
// wait for last step to finish
ourLog.info("Setting last step latch");
@@ -362,17 +470,16 @@ public class Batch2CoordinatorIT extends BaseJpaR4Test {
// waiting
myBatch2JobHelper.awaitJobCompletion(instanceId);
- myLastStepLatch.awaitExpected();
ourLog.info("awaited the last step");
+ myLastStepLatch.awaitExpected();
// verify
Optional instanceOp = myJobPersistence.fetchInstance(instanceId);
assertTrue(instanceOp.isPresent());
JobInstance jobInstance = instanceOp.get();
- // ensure our completion handler fires with the up-to-date job instance
+ // ensure our completion handler fired
assertTrue(completionBool.get());
- assertTrue(jobStatusBool.get());
assertEquals(StatusEnum.COMPLETED, jobInstance.getStatus());
assertEquals(1.0, jobInstance.getProgress());
@@ -382,7 +489,7 @@ public class Batch2CoordinatorIT extends BaseJpaR4Test {
@ValueSource(booleans = {true, false})
public void testJobDefinitionWithReductionStepIT(boolean theDelayReductionStepBool) throws InterruptedException {
// setup
- String jobId = new Exception().getStackTrace()[0].getMethodName() + "_" + theDelayReductionStepBool;
+ String jobId = getMethodNameForJobId() + "_" + theDelayReductionStepBool;
String testInfo = "test";
AtomicInteger secondStepInt = new AtomicInteger();
@@ -441,12 +548,12 @@ public class Batch2CoordinatorIT extends BaseJpaR4Test {
JobInstanceStartRequest request = buildRequest(jobId);
myFirstStepLatch.setExpectedCount(1);
Batch2JobStartResponse startResponse = myJobCoordinator.startInstance(new SystemRequestDetails(), request);
-
String instanceId = startResponse.getInstanceId();
+ myBatch2JobHelper.runMaintenancePass();
myFirstStepLatch.awaitExpected();
assertNotNull(instanceId);
- myBatch2JobHelper.awaitGatedStepId(FIRST_STEP_ID, instanceId);
+ myBatch2JobHelper.awaitGatedStepId(SECOND_STEP_ID, instanceId);
// wait for last step to finish
ourLog.info("Setting last step latch");
@@ -482,6 +589,95 @@ public class Batch2CoordinatorIT extends BaseJpaR4Test {
assertEquals(1.0, jobInstance.getProgress());
}
+ @Test
+ public void testJobWithLongPollingStep() throws InterruptedException {
+ // create job definition
+ int callsToMake = 3;
+ int chunksToAwait = 2;
+ String jobId = getMethodNameForJobId();
+
+ ConcurrentHashMap chunkToCounter = new ConcurrentHashMap<>();
+ HashMap chunkToCallsToMake = new HashMap<>();
+ IJobStepWorker first = (step, sink) -> {
+ for (int i = 0; i < chunksToAwait; i++) {
+ String cv = "chunk" + i;
+ chunkToCallsToMake.put(cv, callsToMake);
+ sink.accept(new FirstStepOutput().setValue(cv));
+ }
+ return RunOutcome.SUCCESS;
+ };
+
+ // step 2
+ IJobStepWorker second = (step, sink) -> {
+ // simulate a call
+ Awaitility.await().atMost(100, TimeUnit.MICROSECONDS);
+
+ // we use Batch2FastSchedulerConfig, so we have a fast scheduler
+ // that should catch and call repeatedly pretty quickly
+ String chunkValue = step.getData().myTestValue;
+ AtomicInteger pollCounter = chunkToCounter.computeIfAbsent(chunkValue, (key) -> {
+ return new AtomicInteger();
+ });
+ int count = pollCounter.getAndIncrement();
+
+ if (chunkToCallsToMake.get(chunkValue) <= count) {
+ sink.accept(new SecondStepOutput());
+ return RunOutcome.SUCCESS;
+ }
+ throw new RetryChunkLaterException(Duration.of(200, ChronoUnit.MILLIS));
+ };
+
+ // step 3
+ ILastJobStepWorker last = (step, sink) -> {
+ myLastStepLatch.call(1);
+ return RunOutcome.SUCCESS;
+ };
+
+ JobDefinition extends IModelJson> jd = JobDefinition.newBuilder()
+ .setJobDefinitionId(jobId)
+ .setJobDescription("test job")
+ .setJobDefinitionVersion(TEST_JOB_VERSION)
+ .setParametersType(TestJobParameters.class)
+ .gatedExecution()
+ .addFirstStep(
+ FIRST_STEP_ID,
+ "First step",
+ FirstStepOutput.class,
+ first
+ )
+ .addIntermediateStep(SECOND_STEP_ID,
+ "Second step",
+ SecondStepOutput.class,
+ second)
+ .addLastStep(
+ LAST_STEP_ID,
+ "Final step",
+ last
+ )
+ .completionHandler(myCompletionHandler)
+ .build();
+ myJobDefinitionRegistry.addJobDefinition(jd);
+
+ // test
+ JobInstanceStartRequest request = buildRequest(jobId);
+ myLastStepLatch.setExpectedCount(chunksToAwait);
+ Batch2JobStartResponse startResponse = myJobCoordinator.startInstance(new SystemRequestDetails(), request);
+ String instanceId = startResponse.getInstanceId();
+
+ // waiting for the job
+ myBatch2JobHelper.awaitJobCompletion(startResponse);
+ // ensure final step fired
+ myLastStepLatch.awaitExpected();
+
+ // verify
+ assertEquals(chunksToAwait, chunkToCounter.size());
+ for (Map.Entry set : chunkToCounter.entrySet()) {
+ // +1 because after 0 indexing; it will make callsToMake failed calls (0, 1... callsToMake)
+ // and one more successful call (callsToMake + 1)
+ assertEquals(callsToMake + 1, set.getValue().get());
+ }
+ }
+
@Test
public void testFirstStepToSecondStep_doubleChunk_doesNotFastTrack() throws InterruptedException {
IJobStepWorker firstStep = (step, sink) -> {
@@ -491,7 +687,7 @@ public class Batch2CoordinatorIT extends BaseJpaR4Test {
};
IJobStepWorker lastStep = (step, sink) -> callLatch(myLastStepLatch, step);
- String jobDefId = new Exception().getStackTrace()[0].getMethodName();
+ String jobDefId = getMethodNameForJobId();
JobDefinition extends IModelJson> definition = buildGatedJobDefinition(jobDefId, firstStep, lastStep);
myJobDefinitionRegistry.addJobDefinition(definition);
@@ -501,6 +697,7 @@ public class Batch2CoordinatorIT extends BaseJpaR4Test {
myFirstStepLatch.setExpectedCount(1);
Batch2JobStartResponse startResponse = myJobCoordinator.startInstance(new SystemRequestDetails(), request);
String instanceId = startResponse.getInstanceId();
+ myBatch2JobHelper.runMaintenancePass();
myFirstStepLatch.awaitExpected();
myLastStepLatch.setExpectedCount(2);
@@ -513,14 +710,14 @@ public class Batch2CoordinatorIT extends BaseJpaR4Test {
@Test
- public void JobExecutionFailedException_CausesInstanceFailure() {
+ public void jobExecutionFailedException_CausesInstanceFailure() {
// setup
IJobStepWorker firstStep = (step, sink) -> {
throw new JobExecutionFailedException("Expected Test Exception");
};
IJobStepWorker lastStep = (step, sink) -> fail();
- String jobDefId = new Exception().getStackTrace()[0].getMethodName();
+ String jobDefId = getMethodNameForJobId();
JobDefinition extends IModelJson> definition = buildGatedJobDefinition(jobDefId, firstStep, lastStep);
myJobDefinitionRegistry.addJobDefinition(definition);
@@ -538,36 +735,47 @@ public class Batch2CoordinatorIT extends BaseJpaR4Test {
@Test
public void testUnknownException_KeepsInProgress_CanCancelManually() throws InterruptedException {
// setup
- IJobStepWorker firstStep = (step, sink) -> {
- callLatch(myFirstStepLatch, step);
- throw new RuntimeException("Expected Test Exception");
- };
- IJobStepWorker lastStep = (step, sink) -> fail();
- String jobDefId = new Exception().getStackTrace()[0].getMethodName();
- JobDefinition extends IModelJson> definition = buildGatedJobDefinition(jobDefId, firstStep, lastStep);
+ // we want to control the maintenance runner ourselves in this case
+ // to prevent intermittent test failures
+ myJobMaintenanceService.enableMaintenancePass(false);
- myJobDefinitionRegistry.addJobDefinition(definition);
+ try {
+ IJobStepWorker firstStep = (step, sink) -> {
+ callLatch(myFirstStepLatch, step);
+ throw new RuntimeException("Expected Test Exception");
+ };
+ IJobStepWorker lastStep = (step, sink) -> fail();
- JobInstanceStartRequest request = buildRequest(jobDefId);
+ String jobDefId = getMethodNameForJobId();
+ JobDefinition extends IModelJson> definition = buildGatedJobDefinition(jobDefId, firstStep, lastStep);
- // execute
- ourLog.info("Starting job");
- myFirstStepLatch.setExpectedCount(1);
- Batch2JobStartResponse startResponse = myJobCoordinator.startInstance(new SystemRequestDetails(), request);
- String instanceId = startResponse.getInstanceId();
- myFirstStepLatch.awaitExpected();
+ myJobDefinitionRegistry.addJobDefinition(definition);
- // validate
- myBatch2JobHelper.awaitJobInProgress(instanceId);
+ JobInstanceStartRequest request = buildRequest(jobDefId);
- // execute
- ourLog.info("Cancel job {}", instanceId);
- myJobCoordinator.cancelInstance(instanceId);
- ourLog.info("Cancel job {} done", instanceId);
+ // execute
+ ourLog.info("Starting job");
+ myFirstStepLatch.setExpectedCount(1);
+ Batch2JobStartResponse startResponse = myJobCoordinator.startInstance(new SystemRequestDetails(), request);
+ String instanceId = startResponse.getInstanceId();
+ myBatch2JobHelper.forceRunMaintenancePass();
+ myFirstStepLatch.awaitExpected();
- // validate
- myBatch2JobHelper.awaitJobCancelled(instanceId);
+ // validate
+ myBatch2JobHelper.awaitJobHasStatusWithForcedMaintenanceRuns(instanceId, StatusEnum.IN_PROGRESS);
+
+ // execute
+ ourLog.info("Cancel job {}", instanceId);
+ myJobCoordinator.cancelInstance(instanceId);
+ ourLog.info("Cancel job {} done", instanceId);
+
+ // validate
+ myBatch2JobHelper.awaitJobHasStatusWithForcedMaintenanceRuns(instanceId,
+ StatusEnum.CANCELLED);
+ } finally {
+ myJobMaintenanceService.enableMaintenancePass(true);
+ }
}
@Test
@@ -586,7 +794,7 @@ public class Batch2CoordinatorIT extends BaseJpaR4Test {
return RunOutcome.SUCCESS;
};
// job definition
- String jobDefId = new Exception().getStackTrace()[0].getMethodName();
+ String jobDefId = getMethodNameForJobId();
JobDefinition extends IModelJson> jd = JobDefinition.newBuilder()
.setJobDefinitionId(jobDefId)
.setJobDescription("test job")
@@ -629,6 +837,15 @@ public class Batch2CoordinatorIT extends BaseJpaR4Test {
return request;
}
+ /**
+ * Returns the method name of the calling method for a unique job id.
+ * It is best this is called from the test method directly itself, and never
+ * delegate to a separate child method.s
+ */
+ private String getMethodNameForJobId() {
+ return new Exception().getStackTrace()[1].getMethodName();
+ }
+
@Nonnull
private JobDefinition extends IModelJson> buildGatedJobDefinition(String theJobId, IJobStepWorker theFirstStep, IJobStepWorker theLastStep) {
return JobDefinition.newBuilder()
@@ -723,6 +940,7 @@ public class Batch2CoordinatorIT extends BaseJpaR4Test {
)
.completionHandler(myCompletionHandler)
.build();
+ myJobDefinitionRegistry.removeJobDefinition(theJobId, 1);
myJobDefinitionRegistry.addJobDefinition(jd);
}
@@ -732,8 +950,16 @@ public class Batch2CoordinatorIT extends BaseJpaR4Test {
}
static class FirstStepOutput implements IModelJson {
+ @JsonProperty("test")
+ private String myTestValue;
+
FirstStepOutput() {
}
+
+ public FirstStepOutput setValue(String theV) {
+ myTestValue = theV;
+ return this;
+ }
}
static class SecondStepOutput implements IModelJson {
diff --git a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/batch2/Batch2JobInstanceRepositoryTest.java b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/batch2/Batch2JobInstanceRepositoryTest.java
index 1605ada7de8..01e0ab868f9 100644
--- a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/batch2/Batch2JobInstanceRepositoryTest.java
+++ b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/batch2/Batch2JobInstanceRepositoryTest.java
@@ -1,12 +1,10 @@
package ca.uhn.fhir.jpa.batch2;
import ca.uhn.fhir.batch2.model.StatusEnum;
-import ca.uhn.fhir.jpa.dao.data.IBatch2JobInstanceRepository;
import ca.uhn.fhir.jpa.entity.Batch2JobInstanceEntity;
import ca.uhn.fhir.jpa.test.BaseJpaR4Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.CsvSource;
-import org.springframework.beans.factory.annotation.Autowired;
import java.util.Arrays;
import java.util.Date;
@@ -18,9 +16,6 @@ import static org.junit.jupiter.api.Assertions.assertEquals;
public class Batch2JobInstanceRepositoryTest extends BaseJpaR4Test {
- @Autowired
- IBatch2JobInstanceRepository myBatch2JobInstanceRepository;
-
@ParameterizedTest
@CsvSource({
"QUEUED, FAILED, QUEUED, true, normal transition",
@@ -38,16 +33,16 @@ public class Batch2JobInstanceRepositoryTest extends BaseJpaR4Test {
entity.setStatus(theCurrentState);
entity.setCreateTime(new Date());
entity.setDefinitionId("definition_id");
- myBatch2JobInstanceRepository.save(entity);
+ myJobInstanceRepository.save(entity);
// when
int changeCount =
runInTransaction(()->
- myBatch2JobInstanceRepository.updateInstanceStatusIfIn(jobId, theTargetState, theAllowedPriorStates));
+ myJobInstanceRepository.updateInstanceStatusIfIn(jobId, theTargetState, theAllowedPriorStates));
// then
Batch2JobInstanceEntity readBack = runInTransaction(() ->
- myBatch2JobInstanceRepository.findById(jobId).orElseThrow());
+ myJobInstanceRepository.findById(jobId).orElseThrow());
if (theExpectedSuccessFlag) {
assertEquals(1, changeCount, "The change happened");
assertEquals(theTargetState, readBack.getStatus());
diff --git a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/batch2/Batch2JobMaintenanceDatabaseIT.java b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/batch2/Batch2JobMaintenanceDatabaseIT.java
index 8030e908f5a..f5143b9ffbc 100644
--- a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/batch2/Batch2JobMaintenanceDatabaseIT.java
+++ b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/batch2/Batch2JobMaintenanceDatabaseIT.java
@@ -27,6 +27,7 @@ import ca.uhn.fhir.model.api.IModelJson;
import ca.uhn.fhir.util.JsonUtil;
import ca.uhn.test.concurrency.IPointcutLatch;
import ca.uhn.test.concurrency.PointcutLatch;
+import jakarta.annotation.Nonnull;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Disabled;
@@ -39,7 +40,6 @@ import org.springframework.messaging.MessageChannel;
import org.springframework.messaging.support.ChannelInterceptor;
import org.springframework.transaction.support.TransactionTemplate;
-import jakarta.annotation.Nonnull;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
@@ -358,10 +358,10 @@ public class Batch2JobMaintenanceDatabaseIT extends BaseJpaR4Test {
WorkChunkExpectation expectation = new WorkChunkExpectation(
"""
-chunk1, FIRST, COMPLETED
-chunk2, SECOND, QUEUED
-chunk3, LAST, QUEUED
-""",
+ chunk1, FIRST, COMPLETED
+ chunk2, SECOND, QUEUED
+ chunk3, LAST, QUEUED
+ """,
""
);
diff --git a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/batch2/Batch2JobMaintenanceIT.java b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/batch2/Batch2JobMaintenanceIT.java
index 6f989437e6e..c39c72f609c 100644
--- a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/batch2/Batch2JobMaintenanceIT.java
+++ b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/batch2/Batch2JobMaintenanceIT.java
@@ -11,17 +11,26 @@ import ca.uhn.fhir.batch2.api.VoidModel;
import ca.uhn.fhir.batch2.coordinator.JobDefinitionRegistry;
import ca.uhn.fhir.batch2.maintenance.JobMaintenanceServiceImpl;
import ca.uhn.fhir.batch2.model.JobDefinition;
+import ca.uhn.fhir.batch2.model.JobInstance;
import ca.uhn.fhir.batch2.model.JobInstanceStartRequest;
import ca.uhn.fhir.batch2.model.JobWorkNotificationJsonMessage;
+import ca.uhn.fhir.batch2.model.StatusEnum;
import ca.uhn.fhir.jpa.subscription.channel.api.ChannelConsumerSettings;
import ca.uhn.fhir.jpa.subscription.channel.api.IChannelFactory;
import ca.uhn.fhir.jpa.subscription.channel.impl.LinkedBlockingChannel;
import ca.uhn.fhir.jpa.test.BaseJpaR4Test;
import ca.uhn.fhir.jpa.test.Batch2JobHelper;
+import ca.uhn.fhir.jpa.test.config.Batch2FastSchedulerConfig;
import ca.uhn.fhir.model.api.IModelJson;
+import ca.uhn.fhir.rest.api.server.SystemRequestDetails;
import ca.uhn.fhir.test.utilities.UnregisterScheduledProcessor;
+import ca.uhn.fhir.testjob.TestJobDefinitionUtils;
+import ca.uhn.fhir.testjob.models.FirstStepOutput;
+import ca.uhn.fhir.testjob.models.ReductionStepOutput;
+import ca.uhn.fhir.testjob.models.TestJobParameters;
import ca.uhn.test.concurrency.PointcutLatch;
import com.fasterxml.jackson.annotation.JsonProperty;
+import jakarta.annotation.Nonnull;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -31,8 +40,6 @@ import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.TestPropertySource;
-import jakarta.annotation.Nonnull;
-import jakarta.annotation.PostConstruct;
import java.util.ArrayList;
import java.util.List;
@@ -41,9 +48,9 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* The on-enter actions are defined in
- * {@link ca.uhn.fhir.batch2.progress.JobInstanceStatusUpdater#handleStatusChange}
+ * {@link ca.uhn.fhir.batch2.progress.JobInstanceStatusUpdater#handleStatusChange(JobInstance)}}
* {@link ca.uhn.fhir.batch2.progress.InstanceProgress#updateStatus(JobInstance)}
- * {@link JobInstanceProcessor#cleanupInstance()}
+ * {@link ca.uhn.fhir.batch2.maintenance.JobInstanceProcessor#cleanupInstance()}
* For chunks:
* {@link ca.uhn.fhir.jpa.batch2.JpaJobPersistenceImpl#onWorkChunkCreate}
@@ -53,13 +60,10 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
@TestPropertySource(properties = {
UnregisterScheduledProcessor.SCHEDULING_DISABLED_EQUALS_FALSE
})
-@ContextConfiguration(classes = {Batch2JobMaintenanceIT.SpringConfig.class})
+@ContextConfiguration(classes = {Batch2FastSchedulerConfig.class})
public class Batch2JobMaintenanceIT extends BaseJpaR4Test {
private static final Logger ourLog = LoggerFactory.getLogger(Batch2JobMaintenanceIT.class);
- public static final int TEST_JOB_VERSION = 1;
- public static final String FIRST_STEP_ID = "first-step";
- public static final String LAST_STEP_ID = "last-step";
@Autowired
JobDefinitionRegistry myJobDefinitionRegistry;
@Autowired
@@ -87,6 +91,7 @@ public class Batch2JobMaintenanceIT extends BaseJpaR4Test {
@BeforeEach
public void before() {
+ myStorageSettings.setJobFastTrackingEnabled(true);
myCompletionHandler = details -> {};
myWorkChannel = (LinkedBlockingChannel) myChannelFactory.getOrCreateReceiver(CHANNEL_NAME, JobWorkNotificationJsonMessage.class, new ChannelConsumerSettings());
JobMaintenanceServiceImpl jobMaintenanceService = (JobMaintenanceServiceImpl) myJobMaintenanceService;
@@ -99,7 +104,6 @@ public class Batch2JobMaintenanceIT extends BaseJpaR4Test {
@AfterEach
public void after() {
myWorkChannel.clearInterceptorsForUnitTest();
- myStorageSettings.setJobFastTrackingEnabled(true);
JobMaintenanceServiceImpl jobMaintenanceService = (JobMaintenanceServiceImpl) myJobMaintenanceService;
jobMaintenanceService.setMaintenanceJobStartedCallback(() -> {});
}
@@ -122,7 +126,8 @@ public class Batch2JobMaintenanceIT extends BaseJpaR4Test {
myFirstStepLatch.setExpectedCount(1);
myLastStepLatch.setExpectedCount(1);
- String batchJobId = myJobCoordinator.startInstance(request).getInstanceId();
+ String batchJobId = myJobCoordinator.startInstance(new SystemRequestDetails(), request).getInstanceId();
+
myFirstStepLatch.awaitExpected();
myBatch2JobHelper.assertFastTracking(batchJobId);
@@ -156,12 +161,12 @@ public class Batch2JobMaintenanceIT extends BaseJpaR4Test {
public void testFirstStepToSecondStepFasttrackingDisabled_singleChunkDoesNotFasttrack() throws InterruptedException {
myStorageSettings.setJobFastTrackingEnabled(false);
- IJobStepWorker firstStep = (step, sink) -> {
- sink.accept(new Batch2JobMaintenanceIT.FirstStepOutput());
+ IJobStepWorker firstStep = (step, sink) -> {
+ sink.accept(new FirstStepOutput());
callLatch(myFirstStepLatch, step);
return RunOutcome.SUCCESS;
};
- IJobStepWorker lastStep = (step, sink) -> callLatch(myLastStepLatch, step);
+ IJobStepWorker lastStep = (step, sink) -> callLatch(myLastStepLatch, step);
String jobDefId = new Exception().getStackTrace()[0].getMethodName();
@@ -173,7 +178,7 @@ public class Batch2JobMaintenanceIT extends BaseJpaR4Test {
myFirstStepLatch.setExpectedCount(1);
myLastStepLatch.setExpectedCount(1);
- String batchJobId = myJobCoordinator.startInstance(request).getInstanceId();
+ String batchJobId = myJobCoordinator.startInstance(new SystemRequestDetails(), request).getInstanceId();
myFirstStepLatch.awaitExpected();
myBatch2JobHelper.assertFastTracking(batchJobId);
@@ -200,65 +205,20 @@ public class Batch2JobMaintenanceIT extends BaseJpaR4Test {
@Nonnull
private JobDefinition extends IModelJson> buildGatedJobDefinition(String theJobId, IJobStepWorker theFirstStep, IJobStepWorker theLastStep) {
- return JobDefinition.newBuilder()
- .setJobDefinitionId(theJobId)
- .setJobDescription("test job")
- .setJobDefinitionVersion(TEST_JOB_VERSION)
- .setParametersType(TestJobParameters.class)
- .gatedExecution()
- .addFirstStep(
- FIRST_STEP_ID,
- "Test first step",
- FirstStepOutput.class,
- theFirstStep
- )
- .addLastStep(
- LAST_STEP_ID,
- "Test last step",
- theLastStep
- )
- .completionHandler(myCompletionHandler)
- .build();
+ return TestJobDefinitionUtils.buildGatedJobDefinition(
+ theJobId,
+ theFirstStep,
+ theLastStep,
+ myCompletionHandler
+ );
}
- static class TestJobParameters implements IModelJson {
- TestJobParameters() {
- }
- }
-
- static class FirstStepOutput implements IModelJson {
- FirstStepOutput() {
- }
- }
-
- static class SecondStepOutput implements IModelJson {
- @JsonProperty("test")
- private String myTestValue;
-
- SecondStepOutput() {
- }
-
- public void setValue(String theV) {
- myTestValue = theV;
- }
- }
-
- static class ReductionStepOutput implements IModelJson {
+ static class OurReductionStepOutput extends ReductionStepOutput {
@JsonProperty("result")
private List> myResult;
- ReductionStepOutput(List> theResult) {
+ OurReductionStepOutput(List> theResult) {
myResult = theResult;
}
}
-
- static class SpringConfig {
- @Autowired
- IJobMaintenanceService myJobMaintenanceService;
-
- @PostConstruct
- void fastScheduler() {
- ((JobMaintenanceServiceImpl)myJobMaintenanceService).setScheduledJobFrequencyMillis(200);
- }
- }
}
diff --git a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/batch2/BulkDataErrorAbuseTest.java b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/batch2/BulkDataErrorAbuseTest.java
index b8b16b4670f..e4fded6cc3e 100644
--- a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/batch2/BulkDataErrorAbuseTest.java
+++ b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/batch2/BulkDataErrorAbuseTest.java
@@ -1,7 +1,6 @@
package ca.uhn.fhir.jpa.batch2;
import ca.uhn.fhir.batch2.api.IJobCoordinator;
-import ca.uhn.fhir.rest.api.server.bulk.BulkExportJobParameters;
import ca.uhn.fhir.batch2.model.JobInstance;
import ca.uhn.fhir.batch2.model.JobInstanceStartRequest;
import ca.uhn.fhir.jpa.api.config.JpaStorageSettings;
@@ -10,10 +9,13 @@ import ca.uhn.fhir.jpa.batch.models.Batch2JobStartResponse;
import ca.uhn.fhir.jpa.provider.BaseResourceProviderR4Test;
import ca.uhn.fhir.jpa.test.config.TestR4Config;
import ca.uhn.fhir.rest.api.Constants;
+import ca.uhn.fhir.rest.api.server.bulk.BulkExportJobParameters;
import ca.uhn.fhir.rest.server.exceptions.InternalErrorException;
import ca.uhn.fhir.util.Batch2JobDefinitionConstants;
import ca.uhn.fhir.util.JsonUtil;
import com.google.common.collect.Sets;
+import org.hl7.fhir.instance.model.api.IBaseResource;
+import org.hl7.fhir.instance.model.api.IIdType;
import org.hl7.fhir.r4.model.Binary;
import org.hl7.fhir.r4.model.Enumerations;
import org.hl7.fhir.r4.model.Group;
@@ -36,13 +38,16 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.CompletionService;
import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorCompletionService;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
+import static org.awaitility.Awaitility.await;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.emptyOrNullString;
import static org.hamcrest.Matchers.equalTo;
@@ -64,6 +69,7 @@ public class BulkDataErrorAbuseTest extends BaseResourceProviderR4Test {
@BeforeEach
void beforeEach() {
+ ourLog.info("BulkDataErrorAbuseTest.beforeEach");
afterPurgeDatabase();
}
@@ -93,7 +99,7 @@ public class BulkDataErrorAbuseTest extends BaseResourceProviderR4Test {
duAbuseTest(Integer.MAX_VALUE);
}
- private void duAbuseTest(int taskExecutions) throws InterruptedException, ExecutionException {
+ private void duAbuseTest(int taskExecutions) {
// Create some resources
Patient patient = new Patient();
patient.setId("PING1");
@@ -133,18 +139,19 @@ public class BulkDataErrorAbuseTest extends BaseResourceProviderR4Test {
ExecutorService executorService = new ThreadPoolExecutor(workerCount, workerCount,
0L, TimeUnit.MILLISECONDS,
workQueue);
+ CompletionService completionService = new ExecutorCompletionService<>(executorService);
ourLog.info("Starting task creation");
- List> futures = new ArrayList<>();
+ int maxFuturesToProcess = 500;
for (int i = 0; i < taskExecutions; i++) {
- futures.add(executorService.submit(() -> {
+ completionService.submit(() -> {
String instanceId = null;
try {
instanceId = startJob(options);
// Run a scheduled pass to build the export
- myBatch2JobHelper.awaitJobCompletion(instanceId, 60);
+ myBatch2JobHelper.awaitJobCompletion(instanceId, 10);
verifyBulkExportResults(instanceId, List.of("Patient/PING1", "Patient/PING2"), Collections.singletonList("Patient/PNING3"));
@@ -153,14 +160,11 @@ public class BulkDataErrorAbuseTest extends BaseResourceProviderR4Test {
ourLog.error("Caught an error during processing instance {}", instanceId, theError);
throw new InternalErrorException("Caught an error during processing instance " + instanceId, theError);
}
- }));
+ });
// Don't let the list of futures grow so big we run out of memory
- if (futures.size() > 1000) {
- while (futures.size() > 500) {
- // This should always return true, but it'll throw an exception if we failed
- assertTrue(futures.remove(0).get());
- }
+ if (i != 0 && i % maxFuturesToProcess == 0) {
+ executeFutures(completionService, maxFuturesToProcess);
}
}
@@ -168,18 +172,53 @@ public class BulkDataErrorAbuseTest extends BaseResourceProviderR4Test {
// wait for completion to avoid stranding background tasks.
executorService.shutdown();
- assertTrue(executorService.awaitTermination(60, TimeUnit.SECONDS), "Finished before timeout");
+ await()
+ .atMost(60, TimeUnit.SECONDS)
+ .until(() -> {
+ return executorService.isTerminated() && executorService.isShutdown();
+ });
// verify that all requests succeeded
ourLog.info("All tasks complete. Verify results.");
- for (var next : futures) {
- // This should always return true, but it'll throw an exception if we failed
- assertTrue(next.get());
- }
+ executeFutures(completionService, taskExecutions % maxFuturesToProcess);
+
+ executorService.shutdown();
+ await()
+ .atMost(60, TimeUnit.SECONDS)
+ .until(() -> {
+ return executorService.isTerminated() && executorService.isShutdown();
+ });
ourLog.info("Finished task execution");
}
+ private void executeFutures(CompletionService theCompletionService, int theTotal) {
+ List errors = new ArrayList<>();
+ int count = 0;
+
+ while (count + errors.size() < theTotal) {
+ try {
+ Future future = theCompletionService.take();
+ boolean r = future.get();
+ assertTrue(r);
+ count++;
+ } catch (Exception ex) {
+ // we will run all the threads to completion, even if we have errors;
+ // this is so we don't have background threads kicking around with
+ // partial changes.
+ // we either do this, or shutdown the completion service in an
+ // "inelegant" manner, dropping all threads (which we aren't doing)
+ ourLog.error("Failed after checking " + count + " futures");
+ errors.add(ex.getMessage());
+ }
+ }
+
+ if (!errors.isEmpty()) {
+ fail(String.format("Failed to execute futures. Found %d errors :\n", errors.size())
+ + String.join(", ", errors));
+ }
+ }
+
private void verifyBulkExportResults(String theInstanceId, List theContainedList, List theExcludedList) {
// Iterate over the files
@@ -196,7 +235,6 @@ public class BulkDataErrorAbuseTest extends BaseResourceProviderR4Test {
String resourceType = file.getKey();
List binaryIds = file.getValue();
for (var nextBinaryId : binaryIds) {
-
Binary binary = myBinaryDao.read(new IdType(nextBinaryId), mySrd);
assertEquals(Constants.CT_FHIR_NDJSON, binary.getContentType());
@@ -207,18 +245,17 @@ public class BulkDataErrorAbuseTest extends BaseResourceProviderR4Test {
.lines().toList();
ourLog.debug("Export job {} file {} line-count: {}", theInstanceId, nextBinaryId, lines.size());
- lines.stream()
- .map(line -> myFhirContext.newJsonParser().parseResource(line))
- .map(r -> r.getIdElement().toUnqualifiedVersionless())
- .forEach(nextId -> {
- if (!resourceType.equals(nextId.getResourceType())) {
- fail("Found resource of type " + nextId.getResourceType() + " in file for type " + resourceType);
- } else {
- if (!foundIds.add(nextId.getValue())) {
- fail("Found duplicate ID: " + nextId.getValue());
- }
+ for (String line : lines) {
+ IBaseResource resource = myFhirContext.newJsonParser().parseResource(line);
+ IIdType nextId = resource.getIdElement().toUnqualifiedVersionless();
+ if (!resourceType.equals(nextId.getResourceType())) {
+ fail("Found resource of type " + nextId.getResourceType() + " in file for type " + resourceType);
+ } else {
+ if (!foundIds.add(nextId.getValue())) {
+ fail("Found duplicate ID: " + nextId.getValue());
}
- });
+ }
+ }
}
}
diff --git a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/batch2/JobInstanceRepositoryTest.java b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/batch2/JobInstanceRepositoryTest.java
index 2485daacc91..cd95d6faf27 100644
--- a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/batch2/JobInstanceRepositoryTest.java
+++ b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/batch2/JobInstanceRepositoryTest.java
@@ -4,7 +4,6 @@ import ca.uhn.fhir.batch2.api.IJobPersistence;
import ca.uhn.fhir.batch2.model.FetchJobInstancesRequest;
import ca.uhn.fhir.batch2.model.JobInstance;
import ca.uhn.fhir.batch2.model.StatusEnum;
-import ca.uhn.fhir.jpa.dao.data.IBatch2JobInstanceRepository;
import ca.uhn.fhir.jpa.entity.Batch2JobInstanceEntity;
import ca.uhn.fhir.jpa.test.BaseJpaR4Test;
import org.junit.jupiter.api.AfterEach;
@@ -23,8 +22,6 @@ import static org.hamcrest.Matchers.hasSize;
public class JobInstanceRepositoryTest extends BaseJpaR4Test {
- @Autowired
- private IBatch2JobInstanceRepository myJobInstanceRepository;
@Autowired
private IJobPersistence myJobPersistenceSvc;
private static final String PARAMS = "{\"param1\":\"value1\"}";
diff --git a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/batch2/JpaJobPersistenceImplTest.java b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/batch2/JpaJobPersistenceImplTest.java
index 90654ff9bc1..fa492384b37 100644
--- a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/batch2/JpaJobPersistenceImplTest.java
+++ b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/batch2/JpaJobPersistenceImplTest.java
@@ -1,9 +1,15 @@
package ca.uhn.fhir.jpa.batch2;
+import ca.uhn.fhir.batch2.api.IJobMaintenanceService;
import ca.uhn.fhir.batch2.api.IJobPersistence;
import ca.uhn.fhir.batch2.api.JobOperationResultJson;
+import ca.uhn.fhir.batch2.api.RunOutcome;
+import ca.uhn.fhir.batch2.channel.BatchJobSender;
+import ca.uhn.fhir.batch2.coordinator.JobDefinitionRegistry;
import ca.uhn.fhir.batch2.jobs.imprt.NdJsonFileJson;
+import ca.uhn.fhir.batch2.model.JobDefinition;
import ca.uhn.fhir.batch2.model.JobInstance;
+import ca.uhn.fhir.batch2.model.JobWorkNotification;
import ca.uhn.fhir.batch2.model.StatusEnum;
import ca.uhn.fhir.batch2.model.WorkChunk;
import ca.uhn.fhir.batch2.model.WorkChunkCompletionEvent;
@@ -18,26 +24,34 @@ import ca.uhn.fhir.jpa.dao.data.IBatch2WorkChunkRepository;
import ca.uhn.fhir.jpa.entity.Batch2JobInstanceEntity;
import ca.uhn.fhir.jpa.entity.Batch2WorkChunkEntity;
import ca.uhn.fhir.jpa.test.BaseJpaR4Test;
+import ca.uhn.fhir.jpa.test.Batch2JobHelper;
+import ca.uhn.fhir.jpa.test.config.Batch2FastSchedulerConfig;
+import ca.uhn.fhir.testjob.TestJobDefinitionUtils;
+import ca.uhn.fhir.testjob.models.FirstStepOutput;
import ca.uhn.fhir.util.JsonUtil;
import ca.uhn.hapi.fhir.batch2.test.AbstractIJobPersistenceSpecificationTest;
import ca.uhn.hapi.fhir.batch2.test.configs.SpyOverrideConfig;
+import ca.uhn.test.concurrency.PointcutLatch;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterators;
+import jakarta.annotation.Nonnull;
+import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.MethodOrderer;
import org.junit.jupiter.api.Nested;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestMethodOrder;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.CsvSource;
import org.junit.jupiter.params.provider.MethodSource;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Import;
import org.springframework.data.domain.Page;
import org.springframework.data.domain.PageRequest;
import org.springframework.data.domain.Sort;
+import org.springframework.test.context.ContextConfiguration;
import org.springframework.transaction.PlatformTransactionManager;
-import jakarta.annotation.Nonnull;
import java.time.Instant;
import java.time.LocalDateTime;
import java.time.ZoneId;
@@ -60,15 +74,25 @@ import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.clearInvocations;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
@TestMethodOrder(MethodOrderer.MethodName.class)
+@ContextConfiguration(classes = {
+ Batch2FastSchedulerConfig.class
+})
@Import(SpyOverrideConfig.class)
public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
public static final String JOB_DEFINITION_ID = "definition-id";
- public static final String TARGET_STEP_ID = "step-id";
+ public static final String FIRST_STEP_ID = TestJobDefinitionUtils.FIRST_STEP_ID;
+ public static final String LAST_STEP_ID = TestJobDefinitionUtils.LAST_STEP_ID;
public static final String DEF_CHUNK_ID = "definition-chunkId";
- public static final String STEP_CHUNK_ID = "step-chunkId";
+ public static final String STEP_CHUNK_ID = TestJobDefinitionUtils.FIRST_STEP_ID;
public static final int JOB_DEF_VER = 1;
public static final int SEQUENCE_NUMBER = 1;
public static final String CHUNK_DATA = "{\"key\":\"value\"}";
@@ -80,6 +104,25 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
@Autowired
private IBatch2JobInstanceRepository myJobInstanceRepository;
+ @Autowired
+ public Batch2JobHelper myBatch2JobHelper;
+
+ // this is our spy
+ @Autowired
+ private BatchJobSender myBatchSender;
+
+ @Autowired
+ private IJobMaintenanceService myMaintenanceService;
+
+ @Autowired
+ public JobDefinitionRegistry myJobDefinitionRegistry;
+
+ @AfterEach
+ public void after() {
+ myJobDefinitionRegistry.removeJobDefinition(JOB_DEFINITION_ID, JOB_DEF_VER);
+ myMaintenanceService.enableMaintenancePass(true);
+ }
+
@Test
public void testDeleteInstance() {
// Setup
@@ -87,7 +130,7 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
JobInstance instance = createInstance();
String instanceId = mySvc.storeNewInstance(instance);
for (int i = 0; i < 10; i++) {
- storeWorkChunk(JOB_DEFINITION_ID, TARGET_STEP_ID, instanceId, i, JsonUtil.serialize(new NdJsonFileJson().setNdJsonText("{}")));
+ storeWorkChunk(JOB_DEFINITION_ID, FIRST_STEP_ID, instanceId, i, JsonUtil.serialize(new NdJsonFileJson().setNdJsonText("{}")), false);
}
// Execute
@@ -102,8 +145,13 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
});
}
- private String storeWorkChunk(String theJobDefinitionId, String theTargetStepId, String theInstanceId, int theSequence, String theSerializedData) {
- WorkChunkCreateEvent batchWorkChunk = new WorkChunkCreateEvent(theJobDefinitionId, JOB_DEF_VER, theTargetStepId, theInstanceId, theSequence, theSerializedData);
+ private String storeWorkChunk(String theJobDefinitionId, String theTargetStepId, String theInstanceId, int theSequence, String theSerializedData, boolean theGatedExecution) {
+ WorkChunkCreateEvent batchWorkChunk = new WorkChunkCreateEvent(theJobDefinitionId, TestJobDefinitionUtils.TEST_JOB_VERSION, theTargetStepId, theInstanceId, theSequence, theSerializedData, theGatedExecution);
+ return mySvc.onWorkChunkCreate(batchWorkChunk);
+ }
+
+ private String storeFirstWorkChunk(String theJobDefinitionId, String theTargetStepId, String theInstanceId, int theSequence, String theSerializedData) {
+ WorkChunkCreateEvent batchWorkChunk = new WorkChunkCreateEvent(theJobDefinitionId, TestJobDefinitionUtils.TEST_JOB_VERSION, theTargetStepId, theInstanceId, theSequence, theSerializedData, false);
return mySvc.onWorkChunkCreate(batchWorkChunk);
}
@@ -113,7 +161,7 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
String instanceId = mySvc.storeNewInstance(instance);
runInTransaction(() -> {
- Batch2JobInstanceEntity instanceEntity = myJobInstanceRepository.findById(instanceId).orElseThrow(IllegalStateException::new);
+ Batch2JobInstanceEntity instanceEntity = findInstanceByIdOrThrow(instanceId);
assertEquals(StatusEnum.QUEUED, instanceEntity.getStatus());
});
@@ -126,7 +174,7 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
assertEquals(instance.getReport(), foundInstance.getReport());
runInTransaction(() -> {
- Batch2JobInstanceEntity instanceEntity = myJobInstanceRepository.findById(instanceId).orElseThrow(IllegalStateException::new);
+ Batch2JobInstanceEntity instanceEntity = findInstanceByIdOrThrow(instanceId);
assertEquals(StatusEnum.QUEUED, instanceEntity.getStatus());
});
}
@@ -213,12 +261,14 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
@ParameterizedTest
@MethodSource("provideStatuses")
- public void testStartChunkOnlyWorksOnValidChunks(WorkChunkStatusEnum theStatus, boolean theShouldBeStartedByConsumer) {
+ public void testStartChunkOnlyWorksOnValidChunks(WorkChunkStatusEnum theStatus, boolean theShouldBeStartedByConsumer) throws InterruptedException {
// Setup
JobInstance instance = createInstance();
+ myMaintenanceService.enableMaintenancePass(false);
String instanceId = mySvc.storeNewInstance(instance);
- storeWorkChunk(JOB_DEFINITION_ID, TARGET_STEP_ID, instanceId, 0, CHUNK_DATA);
- WorkChunkCreateEvent batchWorkChunk = new WorkChunkCreateEvent(JOB_DEFINITION_ID, JOB_DEF_VER, TARGET_STEP_ID, instanceId, 0, CHUNK_DATA);
+
+ storeWorkChunk(JOB_DEFINITION_ID, FIRST_STEP_ID, instanceId, 0, CHUNK_DATA, false);
+ WorkChunkCreateEvent batchWorkChunk = new WorkChunkCreateEvent(JOB_DEFINITION_ID, JOB_DEF_VER, FIRST_STEP_ID, instanceId, 0, CHUNK_DATA, false);
String chunkId = mySvc.onWorkChunkCreate(batchWorkChunk);
Optional byId = myWorkChunkRepository.findById(chunkId);
Batch2WorkChunkEntity entity = byId.get();
@@ -230,7 +280,9 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
// Verify
boolean chunkStarted = workChunk.isPresent();
- assertEquals(chunkStarted, theShouldBeStartedByConsumer);
+ assertEquals(theShouldBeStartedByConsumer, chunkStarted);
+ verify(myBatchSender, never())
+ .sendWorkChannelMessage(any());
}
@Test
@@ -344,46 +396,185 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
@Test
public void testUpdateTime() {
// Setup
- JobInstance instance = createInstance();
+ boolean isGatedExecution = false;
+ JobInstance instance = createInstance(true, isGatedExecution);
String instanceId = mySvc.storeNewInstance(instance);
- Date updateTime = runInTransaction(() -> new Date(myJobInstanceRepository.findById(instanceId).orElseThrow().getUpdateTime().getTime()));
+ Date updateTime = runInTransaction(() -> new Date(findInstanceByIdOrThrow(instanceId).getUpdateTime().getTime()));
- sleepUntilTimeChanges();
+ sleepUntilTimeChange();
// Test
runInTransaction(() -> mySvc.updateInstanceUpdateTime(instanceId));
// Verify
- Date updateTime2 = runInTransaction(() -> new Date(myJobInstanceRepository.findById(instanceId).orElseThrow().getUpdateTime().getTime()));
+ Date updateTime2 = runInTransaction(() -> new Date(findInstanceByIdOrThrow(instanceId).getUpdateTime().getTime()));
assertNotEquals(updateTime, updateTime2);
}
+ @Test
+ public void advanceJobStepAndUpdateChunkStatus_forGatedJobWithoutReduction_updatesCurrentStepAndChunkStatus() {
+ // setup
+ boolean isGatedExecution = true;
+ JobInstance instance = createInstance(true, isGatedExecution);
+ String instanceId = mySvc.storeNewInstance(instance);
+ String chunkIdSecondStep1 = storeWorkChunk(JOB_DEFINITION_ID, LAST_STEP_ID, instanceId, 0, null, isGatedExecution);
+ String chunkIdSecondStep2 = storeWorkChunk(JOB_DEFINITION_ID, LAST_STEP_ID, instanceId, 0, null, isGatedExecution);
+
+ runInTransaction(() -> assertEquals(FIRST_STEP_ID, findInstanceByIdOrThrow(instanceId).getCurrentGatedStepId()));
+
+ // execute
+ runInTransaction(() -> {
+ boolean changed = mySvc.advanceJobStepAndUpdateChunkStatus(instanceId, LAST_STEP_ID, false);
+ assertTrue(changed);
+ });
+
+ // verify
+ runInTransaction(() -> {
+ assertEquals(WorkChunkStatusEnum.READY, findChunkByIdOrThrow(chunkIdSecondStep1).getStatus());
+ assertEquals(WorkChunkStatusEnum.READY, findChunkByIdOrThrow(chunkIdSecondStep2).getStatus());
+ assertEquals(LAST_STEP_ID, findInstanceByIdOrThrow(instanceId).getCurrentGatedStepId());
+ });
+ }
+
+ @Test
+ public void advanceJobStepAndUpdateChunkStatus_whenAlreadyInTargetStep_DoesNotUpdateStepOrChunks() {
+ // setup
+ boolean isGatedExecution = true;
+ JobInstance instance = createInstance(true, isGatedExecution);
+ String instanceId = mySvc.storeNewInstance(instance);
+ String chunkIdSecondStep1 = storeWorkChunk(JOB_DEFINITION_ID, LAST_STEP_ID, instanceId, 0, null, isGatedExecution);
+ String chunkIdSecondStep2 = storeWorkChunk(JOB_DEFINITION_ID, LAST_STEP_ID, instanceId, 0, null, isGatedExecution);
+
+ runInTransaction(() -> assertEquals(FIRST_STEP_ID, findInstanceByIdOrThrow(instanceId).getCurrentGatedStepId()));
+
+ // execute
+ runInTransaction(() -> {
+ boolean changed = mySvc.advanceJobStepAndUpdateChunkStatus(instanceId, FIRST_STEP_ID, false);
+ assertFalse(changed);
+ });
+
+ // verify
+ runInTransaction(() -> {
+ assertEquals(WorkChunkStatusEnum.GATE_WAITING, findChunkByIdOrThrow(chunkIdSecondStep1).getStatus());
+ assertEquals(WorkChunkStatusEnum.GATE_WAITING, findChunkByIdOrThrow(chunkIdSecondStep2).getStatus());
+ assertEquals(FIRST_STEP_ID, findInstanceByIdOrThrow(instanceId).getCurrentGatedStepId());
+ });
+ }
+
@Test
public void testFetchUnknownWork() {
assertFalse(myWorkChunkRepository.findById("FOO").isPresent());
}
- @Test
- public void testStoreAndFetchWorkChunk_NoData() {
- JobInstance instance = createInstance();
+ @ParameterizedTest
+ @CsvSource({
+ "false, READY, QUEUED",
+ "true, GATE_WAITING, QUEUED"
+ })
+ public void testStoreAndFetchWorkChunk_withOrWithoutGatedExecutionNoData_createdAndTransitionToExpectedStatus(boolean theGatedExecution, WorkChunkStatusEnum theExpectedStatusOnCreate, WorkChunkStatusEnum theExpectedStatusAfterTransition) throws InterruptedException {
+ // setup
+ JobInstance instance = createInstance(true, theGatedExecution);
+
+ // when
+ PointcutLatch latch = new PointcutLatch("senderlatch");
+ doAnswer(a -> {
+ latch.call(1);
+ return Void.class;
+ }).when(myBatchSender).sendWorkChannelMessage(any(JobWorkNotification.class));
+ latch.setExpectedCount(1);
+ myMaintenanceService.enableMaintenancePass(false);
String instanceId = mySvc.storeNewInstance(instance);
- String id = storeWorkChunk(JOB_DEFINITION_ID, TARGET_STEP_ID, instanceId, 0, null);
+ // execute & verify
+ String firstChunkId = storeFirstWorkChunk(JOB_DEFINITION_ID, FIRST_STEP_ID, instanceId, 0, null);
+ // mark the first chunk as COMPLETED to allow step advance
+ runInTransaction(() -> myWorkChunkRepository.updateChunkStatus(firstChunkId, WorkChunkStatusEnum.READY, WorkChunkStatusEnum.COMPLETED));
+
+ String id = storeWorkChunk(JOB_DEFINITION_ID, LAST_STEP_ID, instanceId, 0, null, theGatedExecution);
+ runInTransaction(() -> assertEquals(theExpectedStatusOnCreate, findChunkByIdOrThrow(id).getStatus()));
+ myBatch2JobHelper.runMaintenancePass();
+ runInTransaction(() -> assertEquals(theExpectedStatusAfterTransition, findChunkByIdOrThrow(id).getStatus()));
WorkChunk chunk = mySvc.onWorkChunkDequeue(id).orElseThrow(IllegalArgumentException::new);
+ // assert null since we did not input any data when creating the chunks
assertNull(chunk.getData());
+
+ latch.awaitExpected();
+ verify(myBatchSender).sendWorkChannelMessage(any());
+ clearInvocations(myBatchSender);
+ }
+
+ @Test
+ public void testStoreAndFetchWorkChunk_withGatedJobMultipleChunk_correctTransitions() throws InterruptedException {
+ // setup
+ boolean isGatedExecution = true;
+ String expectedFirstChunkData = "IAmChunk1";
+ String expectedSecondChunkData = "IAmChunk2";
+ JobInstance instance = createInstance(true, isGatedExecution);
+ myMaintenanceService.enableMaintenancePass(false);
+ String instanceId = mySvc.storeNewInstance(instance);
+ PointcutLatch latch = new PointcutLatch("senderlatch");
+ doAnswer(a -> {
+ latch.call(1);
+ return Void.class;
+ }).when(myBatchSender).sendWorkChannelMessage(any(JobWorkNotification.class));
+ latch.setExpectedCount(2);
+
+ // execute & verify
+ String firstChunkId = storeFirstWorkChunk(JOB_DEFINITION_ID, FIRST_STEP_ID, instanceId, 0, expectedFirstChunkData);
+ String secondChunkId = storeWorkChunk(JOB_DEFINITION_ID, LAST_STEP_ID, instanceId, 0, expectedSecondChunkData, isGatedExecution);
+
+ runInTransaction(() -> {
+ // check chunks created in expected states
+ assertEquals(WorkChunkStatusEnum.READY, findChunkByIdOrThrow(firstChunkId).getStatus());
+ assertEquals(WorkChunkStatusEnum.GATE_WAITING, findChunkByIdOrThrow(secondChunkId).getStatus());
+ });
+
+ myBatch2JobHelper.runMaintenancePass();
+ runInTransaction(() -> {
+ assertEquals(WorkChunkStatusEnum.QUEUED, findChunkByIdOrThrow(firstChunkId).getStatus());
+ // maintenance should not affect chunks in step 2
+ assertEquals(WorkChunkStatusEnum.GATE_WAITING, findChunkByIdOrThrow(secondChunkId).getStatus());
+ });
+
+ WorkChunk actualFirstChunkData = mySvc.onWorkChunkDequeue(firstChunkId).orElseThrow(IllegalArgumentException::new);
+ runInTransaction(() -> assertEquals(WorkChunkStatusEnum.IN_PROGRESS, findChunkByIdOrThrow(firstChunkId).getStatus()));
+ assertEquals(expectedFirstChunkData, actualFirstChunkData.getData());
+
+ mySvc.onWorkChunkCompletion(new WorkChunkCompletionEvent(firstChunkId, 50, 0));
+ runInTransaction(() -> {
+ assertEquals(WorkChunkStatusEnum.COMPLETED, findChunkByIdOrThrow(firstChunkId).getStatus());
+ assertEquals(WorkChunkStatusEnum.GATE_WAITING, findChunkByIdOrThrow(secondChunkId).getStatus());
+ });
+
+ myBatch2JobHelper.runMaintenancePass();
+ runInTransaction(() -> {
+ assertEquals(WorkChunkStatusEnum.COMPLETED, findChunkByIdOrThrow(firstChunkId).getStatus());
+ // now that all chunks for step 1 is COMPLETED, should enqueue chunks in step 2
+ assertEquals(WorkChunkStatusEnum.QUEUED, findChunkByIdOrThrow(secondChunkId).getStatus());
+ });
+
+ WorkChunk actualSecondChunkData = mySvc.onWorkChunkDequeue(secondChunkId).orElseThrow(IllegalArgumentException::new);
+ runInTransaction(() -> assertEquals(WorkChunkStatusEnum.IN_PROGRESS, findChunkByIdOrThrow(secondChunkId).getStatus()));
+ assertEquals(expectedSecondChunkData, actualSecondChunkData.getData());
+
+ latch.awaitExpected();
+ verify(myBatchSender, times(2))
+ .sendWorkChannelMessage(any());
+ clearInvocations(myBatchSender);
}
@Test
void testStoreAndFetchChunksForInstance_NoData() {
// given
+ boolean isGatedExecution = false;
JobInstance instance = createInstance();
String instanceId = mySvc.storeNewInstance(instance);
- String queuedId = storeWorkChunk(JOB_DEFINITION_ID, TARGET_STEP_ID, instanceId, 0, "some data");
- String erroredId = storeWorkChunk(JOB_DEFINITION_ID, TARGET_STEP_ID, instanceId, 1, "some more data");
- String completedId = storeWorkChunk(JOB_DEFINITION_ID, TARGET_STEP_ID, instanceId, 2, "some more data");
+ String queuedId = storeWorkChunk(JOB_DEFINITION_ID, FIRST_STEP_ID, instanceId, 0, "some data", isGatedExecution);
+ String erroredId = storeWorkChunk(JOB_DEFINITION_ID, FIRST_STEP_ID, instanceId, 1, "some more data", isGatedExecution);
+ String completedId = storeWorkChunk(JOB_DEFINITION_ID, FIRST_STEP_ID, instanceId, 2, "some more data", isGatedExecution);
mySvc.onWorkChunkDequeue(erroredId);
WorkChunkErrorEvent parameters = new WorkChunkErrorEvent(erroredId, "Our error message");
@@ -407,9 +598,9 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
assertEquals(JOB_DEFINITION_ID, workChunk.getJobDefinitionId());
assertEquals(JOB_DEF_VER, workChunk.getJobDefinitionVersion());
assertEquals(instanceId, workChunk.getInstanceId());
- assertEquals(TARGET_STEP_ID, workChunk.getTargetStepId());
+ assertEquals(FIRST_STEP_ID, workChunk.getTargetStepId());
assertEquals(0, workChunk.getSequence());
- assertEquals(WorkChunkStatusEnum.QUEUED, workChunk.getStatus());
+ assertEquals(WorkChunkStatusEnum.READY, workChunk.getStatus());
assertNotNull(workChunk.getCreateTime());
@@ -418,7 +609,7 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
assertNull(workChunk.getEndTime());
assertNull(workChunk.getErrorMessage());
assertEquals(0, workChunk.getErrorCount());
- assertEquals(null, workChunk.getRecordsProcessed());
+ assertNull(workChunk.getRecordsProcessed());
}
{
@@ -426,7 +617,7 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
assertEquals(WorkChunkStatusEnum.ERRORED, workChunk1.getStatus());
assertEquals("Our error message", workChunk1.getErrorMessage());
assertEquals(1, workChunk1.getErrorCount());
- assertEquals(null, workChunk1.getRecordsProcessed());
+ assertNull(workChunk1.getRecordsProcessed());
assertNotNull(workChunk1.getEndTime());
}
@@ -438,18 +629,35 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
assertNull(workChunk2.getErrorMessage());
assertEquals(0, workChunk2.getErrorCount());
}
-
}
-
- @Test
- public void testStoreAndFetchWorkChunk_WithData() {
- JobInstance instance = createInstance();
+ @ParameterizedTest
+ @CsvSource({
+ "false, READY, QUEUED",
+ "true, GATE_WAITING, QUEUED"
+ })
+ public void testStoreAndFetchWorkChunk_withOrWithoutGatedExecutionwithData_createdAndTransitionToExpectedStatus(boolean theGatedExecution, WorkChunkStatusEnum theExpectedCreatedStatus, WorkChunkStatusEnum theExpectedTransitionStatus) throws InterruptedException {
+ // setup
+ JobInstance instance = createInstance(true, theGatedExecution);
+ myMaintenanceService.enableMaintenancePass(false);
String instanceId = mySvc.storeNewInstance(instance);
+ PointcutLatch latch = new PointcutLatch("senderlatch");
+ doAnswer(a -> {
+ latch.call(1);
+ return Void.class;
+ }).when(myBatchSender).sendWorkChannelMessage(any(JobWorkNotification.class));
+ latch.setExpectedCount(1);
- String id = storeWorkChunk(JOB_DEFINITION_ID, TARGET_STEP_ID, instanceId, 0, CHUNK_DATA);
+ // execute & verify
+ String firstChunkId = storeFirstWorkChunk(JOB_DEFINITION_ID, FIRST_STEP_ID, instanceId, 0, null);
+ // mark the first chunk as COMPLETED to allow step advance
+ runInTransaction(() -> myWorkChunkRepository.updateChunkStatus(firstChunkId, WorkChunkStatusEnum.READY, WorkChunkStatusEnum.COMPLETED));
+
+ String id = storeWorkChunk(JOB_DEFINITION_ID, LAST_STEP_ID, instanceId, 0, CHUNK_DATA, theGatedExecution);
assertNotNull(id);
- runInTransaction(() -> assertEquals(WorkChunkStatusEnum.QUEUED, myWorkChunkRepository.findById(id).orElseThrow(IllegalArgumentException::new).getStatus()));
+ runInTransaction(() -> assertEquals(theExpectedCreatedStatus, findChunkByIdOrThrow(id).getStatus()));
+ myBatch2JobHelper.runMaintenancePass();
+ runInTransaction(() -> assertEquals(theExpectedTransitionStatus, findChunkByIdOrThrow(id).getStatus()));
WorkChunk chunk = mySvc.onWorkChunkDequeue(id).orElseThrow(IllegalArgumentException::new);
assertEquals(36, chunk.getInstanceId().length());
@@ -458,19 +666,30 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
assertEquals(WorkChunkStatusEnum.IN_PROGRESS, chunk.getStatus());
assertEquals(CHUNK_DATA, chunk.getData());
- runInTransaction(() -> assertEquals(WorkChunkStatusEnum.IN_PROGRESS, myWorkChunkRepository.findById(id).orElseThrow(IllegalArgumentException::new).getStatus()));
+ runInTransaction(() -> assertEquals(WorkChunkStatusEnum.IN_PROGRESS, findChunkByIdOrThrow(id).getStatus()));
+ latch.awaitExpected();
+ verify(myBatchSender).sendWorkChannelMessage(any());
+ clearInvocations(myBatchSender);
}
@Test
- public void testMarkChunkAsCompleted_Success() {
- JobInstance instance = createInstance();
+ public void testMarkChunkAsCompleted_Success() throws InterruptedException {
+ boolean isGatedExecution = false;
+ myMaintenanceService.enableMaintenancePass(false);
+ JobInstance instance = createInstance(true, isGatedExecution);
String instanceId = mySvc.storeNewInstance(instance);
- String chunkId = storeWorkChunk(DEF_CHUNK_ID, STEP_CHUNK_ID, instanceId, SEQUENCE_NUMBER, CHUNK_DATA);
+ String chunkId = storeWorkChunk(DEF_CHUNK_ID, STEP_CHUNK_ID, instanceId, SEQUENCE_NUMBER, CHUNK_DATA, isGatedExecution);
assertNotNull(chunkId);
+ PointcutLatch latch = new PointcutLatch("senderlatch");
+ doAnswer(a -> {
+ latch.call(1);
+ return Void.class;
+ }).when(myBatchSender).sendWorkChannelMessage(any(JobWorkNotification.class));
+ latch.setExpectedCount(1);
- runInTransaction(() -> assertEquals(WorkChunkStatusEnum.QUEUED, myWorkChunkRepository.findById(chunkId).orElseThrow(IllegalArgumentException::new).getStatus()));
-
- sleepUntilTimeChanges();
+ runInTransaction(() -> assertEquals(WorkChunkStatusEnum.READY, findChunkByIdOrThrow(chunkId).getStatus()));
+ myBatch2JobHelper.runMaintenancePass();
+ runInTransaction(() -> assertEquals(WorkChunkStatusEnum.QUEUED, findChunkByIdOrThrow(chunkId).getStatus()));
WorkChunk chunk = mySvc.onWorkChunkDequeue(chunkId).orElseThrow(IllegalArgumentException::new);
assertEquals(SEQUENCE_NUMBER, chunk.getSequence());
@@ -480,13 +699,13 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
assertNull(chunk.getEndTime());
assertNull(chunk.getRecordsProcessed());
assertNotNull(chunk.getData());
- runInTransaction(() -> assertEquals(WorkChunkStatusEnum.IN_PROGRESS, myWorkChunkRepository.findById(chunkId).orElseThrow(IllegalArgumentException::new).getStatus()));
+ runInTransaction(() -> assertEquals(WorkChunkStatusEnum.IN_PROGRESS, findChunkByIdOrThrow(chunkId).getStatus()));
- sleepUntilTimeChanges();
+ sleepUntilTimeChange();
mySvc.onWorkChunkCompletion(new WorkChunkCompletionEvent(chunkId, 50, 0));
runInTransaction(() -> {
- Batch2WorkChunkEntity entity = myWorkChunkRepository.findById(chunkId).orElseThrow(IllegalArgumentException::new);
+ Batch2WorkChunkEntity entity = findChunkByIdOrThrow(chunkId);
assertEquals(WorkChunkStatusEnum.COMPLETED, entity.getStatus());
assertEquals(50, entity.getRecordsProcessed());
assertNotNull(entity.getCreateTime());
@@ -496,63 +715,41 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
assertTrue(entity.getCreateTime().getTime() < entity.getStartTime().getTime());
assertTrue(entity.getStartTime().getTime() < entity.getEndTime().getTime());
});
- }
-
- @Test
- public void testGatedAdvancementByStatus() {
- // Setup
- JobInstance instance = createInstance();
- String instanceId = mySvc.storeNewInstance(instance);
- String chunkId = storeWorkChunk(DEF_CHUNK_ID, STEP_CHUNK_ID, instanceId, SEQUENCE_NUMBER, null);
- mySvc.onWorkChunkCompletion(new WorkChunkCompletionEvent(chunkId, 0, 0));
-
- boolean canAdvance = mySvc.canAdvanceInstanceToNextStep(instanceId, STEP_CHUNK_ID);
- assertTrue(canAdvance);
-
- //Storing a new chunk with QUEUED should prevent advancement.
- String newChunkId = storeWorkChunk(DEF_CHUNK_ID, STEP_CHUNK_ID, instanceId, SEQUENCE_NUMBER, null);
-
- canAdvance = mySvc.canAdvanceInstanceToNextStep(instanceId, STEP_CHUNK_ID);
- assertFalse(canAdvance);
-
- //Toggle it to complete
- mySvc.onWorkChunkCompletion(new WorkChunkCompletionEvent(newChunkId, 50, 0));
- canAdvance = mySvc.canAdvanceInstanceToNextStep(instanceId, STEP_CHUNK_ID);
- assertTrue(canAdvance);
-
- //Create a new chunk and set it in progress.
- String newerChunkId = storeWorkChunk(DEF_CHUNK_ID, STEP_CHUNK_ID, instanceId, SEQUENCE_NUMBER, null);
- mySvc.onWorkChunkDequeue(newerChunkId);
- canAdvance = mySvc.canAdvanceInstanceToNextStep(instanceId, STEP_CHUNK_ID);
- assertFalse(canAdvance);
-
- //Toggle IN_PROGRESS to complete
- mySvc.onWorkChunkCompletion(new WorkChunkCompletionEvent(newerChunkId, 50, 0));
- canAdvance = mySvc.canAdvanceInstanceToNextStep(instanceId, STEP_CHUNK_ID);
- assertTrue(canAdvance);
+ latch.awaitExpected();
+ verify(myBatchSender).sendWorkChannelMessage(any());
+ clearInvocations(myBatchSender);
}
@Test
public void testMarkChunkAsCompleted_Error() {
- JobInstance instance = createInstance();
+ boolean isGatedExecution = false;
+ PointcutLatch latch = new PointcutLatch("senderlatch");
+ doAnswer(a -> {
+ latch.call(1);
+ return Void.class;
+ }).when(myBatchSender).sendWorkChannelMessage(any(JobWorkNotification.class));
+ latch.setExpectedCount(1);
+ myMaintenanceService.enableMaintenancePass(false);
+
+ JobInstance instance = createInstance(true, isGatedExecution);
String instanceId = mySvc.storeNewInstance(instance);
- String chunkId = storeWorkChunk(DEF_CHUNK_ID, STEP_CHUNK_ID, instanceId, SEQUENCE_NUMBER, null);
+ String chunkId = storeWorkChunk(JOB_DEFINITION_ID, TestJobDefinitionUtils.FIRST_STEP_ID, instanceId, SEQUENCE_NUMBER, null, isGatedExecution);
assertNotNull(chunkId);
- runInTransaction(() -> assertEquals(WorkChunkStatusEnum.QUEUED, myWorkChunkRepository.findById(chunkId).orElseThrow(IllegalArgumentException::new).getStatus()));
-
- sleepUntilTimeChanges();
+ runInTransaction(() -> assertEquals(WorkChunkStatusEnum.READY, findChunkByIdOrThrow(chunkId).getStatus()));
+ myBatch2JobHelper.runMaintenancePass();
+ runInTransaction(() -> assertEquals(WorkChunkStatusEnum.QUEUED, findChunkByIdOrThrow(chunkId).getStatus()));
WorkChunk chunk = mySvc.onWorkChunkDequeue(chunkId).orElseThrow(IllegalArgumentException::new);
assertEquals(SEQUENCE_NUMBER, chunk.getSequence());
assertEquals(WorkChunkStatusEnum.IN_PROGRESS, chunk.getStatus());
- sleepUntilTimeChanges();
+ sleepUntilTimeChange();
WorkChunkErrorEvent request = new WorkChunkErrorEvent(chunkId).setErrorMsg("This is an error message");
mySvc.onWorkChunkError(request);
runInTransaction(() -> {
- Batch2WorkChunkEntity entity = myWorkChunkRepository.findById(chunkId).orElseThrow(IllegalArgumentException::new);
+ Batch2WorkChunkEntity entity = findChunkByIdOrThrow(chunkId);
assertEquals(WorkChunkStatusEnum.ERRORED, entity.getStatus());
assertEquals("This is an error message", entity.getErrorMessage());
assertNotNull(entity.getCreateTime());
@@ -568,7 +765,7 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
WorkChunkErrorEvent request2 = new WorkChunkErrorEvent(chunkId).setErrorMsg("This is an error message 2");
mySvc.onWorkChunkError(request2);
runInTransaction(() -> {
- Batch2WorkChunkEntity entity = myWorkChunkRepository.findById(chunkId).orElseThrow(IllegalArgumentException::new);
+ Batch2WorkChunkEntity entity = findChunkByIdOrThrow(chunkId);
assertEquals(WorkChunkStatusEnum.ERRORED, entity.getStatus());
assertEquals("This is an error message 2", entity.getErrorMessage());
assertNotNull(entity.getCreateTime());
@@ -582,28 +779,39 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
List chunks = ImmutableList.copyOf(mySvc.fetchAllWorkChunksIterator(instanceId, true));
assertEquals(1, chunks.size());
assertEquals(2, chunks.get(0).getErrorCount());
+
+ verify(myBatchSender).sendWorkChannelMessage(any());
+ clearInvocations(myBatchSender);
}
@Test
- public void testMarkChunkAsCompleted_Fail() {
- JobInstance instance = createInstance();
+ public void testMarkChunkAsCompleted_Fail() throws InterruptedException {
+ boolean isGatedExecution = false;
+ myMaintenanceService.enableMaintenancePass(false);
+ JobInstance instance = createInstance(true, isGatedExecution);
String instanceId = mySvc.storeNewInstance(instance);
- String chunkId = storeWorkChunk(DEF_CHUNK_ID, STEP_CHUNK_ID, instanceId, SEQUENCE_NUMBER, null);
+ String chunkId = storeWorkChunk(DEF_CHUNK_ID, STEP_CHUNK_ID, instanceId, SEQUENCE_NUMBER, null, isGatedExecution);
assertNotNull(chunkId);
+ PointcutLatch latch = new PointcutLatch("senderlatch");
+ doAnswer(a -> {
+ latch.call(1);
+ return Void.class;
+ }).when(myBatchSender).sendWorkChannelMessage(any(JobWorkNotification.class));
+ latch.setExpectedCount(1);
- runInTransaction(() -> assertEquals(WorkChunkStatusEnum.QUEUED, myWorkChunkRepository.findById(chunkId).orElseThrow(IllegalArgumentException::new).getStatus()));
-
- sleepUntilTimeChanges();
+ runInTransaction(() -> assertEquals(WorkChunkStatusEnum.READY, findChunkByIdOrThrow(chunkId).getStatus()));
+ myBatch2JobHelper.runMaintenancePass();
+ runInTransaction(() -> assertEquals(WorkChunkStatusEnum.QUEUED, findChunkByIdOrThrow(chunkId).getStatus()));
WorkChunk chunk = mySvc.onWorkChunkDequeue(chunkId).orElseThrow(IllegalArgumentException::new);
assertEquals(SEQUENCE_NUMBER, chunk.getSequence());
assertEquals(WorkChunkStatusEnum.IN_PROGRESS, chunk.getStatus());
- sleepUntilTimeChanges();
+ sleepUntilTimeChange();
mySvc.onWorkChunkFailed(chunkId, "This is an error message");
runInTransaction(() -> {
- Batch2WorkChunkEntity entity = myWorkChunkRepository.findById(chunkId).orElseThrow(IllegalArgumentException::new);
+ Batch2WorkChunkEntity entity = findChunkByIdOrThrow(chunkId);
assertEquals(WorkChunkStatusEnum.FAILED, entity.getStatus());
assertEquals("This is an error message", entity.getErrorMessage());
assertNotNull(entity.getCreateTime());
@@ -612,6 +820,10 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
assertTrue(entity.getCreateTime().getTime() < entity.getStartTime().getTime());
assertTrue(entity.getStartTime().getTime() < entity.getEndTime().getTime());
});
+ latch.awaitExpected();
+ verify(myBatchSender)
+ .sendWorkChannelMessage(any());
+ clearInvocations(myBatchSender);
}
@Test
@@ -626,7 +838,8 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
"stepId",
instanceId,
0,
- "{}"
+ "{}",
+ false
);
String id = mySvc.onWorkChunkCreate(chunk);
chunkIds.add(id);
@@ -674,15 +887,57 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
.orElseThrow(IllegalArgumentException::new));
}
+ private JobInstance createInstance() {
+ return createInstance(false, false);
+ }
@Nonnull
- private JobInstance createInstance() {
+ private JobInstance createInstance(boolean theCreateJobDefBool, boolean theCreateGatedJob) {
JobInstance instance = new JobInstance();
instance.setJobDefinitionId(JOB_DEFINITION_ID);
instance.setStatus(StatusEnum.QUEUED);
instance.setJobDefinitionVersion(JOB_DEF_VER);
instance.setParameters(CHUNK_DATA);
instance.setReport("TEST");
+
+ if (theCreateJobDefBool) {
+ JobDefinition> jobDef;
+
+ if (theCreateGatedJob) {
+ jobDef = TestJobDefinitionUtils.buildGatedJobDefinition(
+ JOB_DEFINITION_ID,
+ (step, sink) -> {
+ sink.accept(new FirstStepOutput());
+ return RunOutcome.SUCCESS;
+ },
+ (step, sink) -> {
+ return RunOutcome.SUCCESS;
+ },
+ theDetails -> {
+
+ }
+ );
+ instance.setCurrentGatedStepId(jobDef.getFirstStepId());
+ } else {
+ jobDef = TestJobDefinitionUtils.buildJobDefinition(
+ JOB_DEFINITION_ID,
+ (step, sink) -> {
+ sink.accept(new FirstStepOutput());
+ return RunOutcome.SUCCESS;
+ },
+ (step, sink) -> {
+ return RunOutcome.SUCCESS;
+ },
+ theDetails -> {
+
+ }
+ );
+ }
+ if (myJobDefinitionRegistry.getJobDefinition(jobDef.getJobDefinitionId(), jobDef.getJobDefinitionVersion()).isEmpty()) {
+ myJobDefinitionRegistry.addJobDefinition(jobDef);
+ }
+ }
+
return instance;
}
@@ -719,4 +974,12 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
Arguments.of(WorkChunkStatusEnum.COMPLETED, false)
);
}
+
+ private Batch2JobInstanceEntity findInstanceByIdOrThrow(String instanceId) {
+ return myJobInstanceRepository.findById(instanceId).orElseThrow(IllegalStateException::new);
+ }
+
+ private Batch2WorkChunkEntity findChunkByIdOrThrow(String secondChunkId) {
+ return myWorkChunkRepository.findById(secondChunkId).orElseThrow(IllegalArgumentException::new);
+ }
}
diff --git a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/bulk/BulkDataExportTest.java b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/bulk/BulkDataExportTest.java
index 1d0f89c493f..810c27bc900 100644
--- a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/bulk/BulkDataExportTest.java
+++ b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/bulk/BulkDataExportTest.java
@@ -13,7 +13,6 @@ import ca.uhn.fhir.jpa.api.model.BulkExportJobResults;
import ca.uhn.fhir.jpa.batch.models.Batch2JobStartResponse;
import ca.uhn.fhir.jpa.batch2.JpaJobPersistenceImpl;
import ca.uhn.fhir.jpa.dao.data.IBatch2WorkChunkRepository;
-import ca.uhn.fhir.jpa.entity.Batch2WorkChunkEntity;
import ca.uhn.fhir.jpa.model.util.JpaConstants;
import ca.uhn.fhir.jpa.provider.BaseResourceProviderR4Test;
import ca.uhn.fhir.rest.api.Constants;
@@ -31,7 +30,6 @@ import ca.uhn.fhir.util.JsonUtil;
import com.google.common.collect.Sets;
import jakarta.annotation.Nonnull;
import org.apache.commons.io.LineIterator;
-import org.apache.commons.lang3.StringUtils;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
@@ -72,7 +70,6 @@ import org.mockito.Spy;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
-import org.springframework.data.domain.PageRequest;
import java.io.IOException;
import java.io.StringReader;
@@ -85,10 +82,9 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
import java.util.stream.Stream;
-import static ca.uhn.fhir.batch2.jobs.export.BulkExportAppCtx.CREATE_REPORT_STEP;
-import static ca.uhn.fhir.batch2.jobs.export.BulkExportAppCtx.WRITE_TO_BINARIES;
import static ca.uhn.fhir.jpa.dao.r4.FhirResourceDaoR4TagsInlineTest.createSearchParameterForInlineSecurity;
import static org.apache.commons.lang3.StringUtils.isNotBlank;
import static org.awaitility.Awaitility.await;
@@ -477,7 +473,8 @@ public class BulkDataExportTest extends BaseResourceProviderR4Test {
verifyBulkExportResults(options, ids, new ArrayList<>());
assertFalse(valueSet.isEmpty());
- assertEquals(ids.size(), valueSet.size());
+ assertEquals(ids.size(), valueSet.size(),
+ "Expected " + String.join(", ", ids) + ". Actual : " + String.join(", ", valueSet));
for (String id : valueSet) {
// should start with our value from the key-value pairs
assertTrue(id.startsWith(value));
@@ -898,6 +895,7 @@ public class BulkDataExportTest extends BaseResourceProviderR4Test {
options.setResourceTypes(Sets.newHashSet("Patient", "Observation", "CarePlan", "MedicationAdministration", "ServiceRequest"));
options.setExportStyle(BulkExportJobParameters.ExportStyle.PATIENT);
options.setOutputFormat(Constants.CT_FHIR_NDJSON);
+
verifyBulkExportResults(options, List.of("Patient/P1", carePlanId, medAdminId, sevReqId, obsSubId, obsPerId), Collections.emptyList());
}
@@ -1096,7 +1094,6 @@ public class BulkDataExportTest extends BaseResourceProviderR4Test {
String resourceType = file.getKey();
List binaryIds = file.getValue();
for (var nextBinaryId : binaryIds) {
-
String nextBinaryIdPart = new IdType(nextBinaryId).getIdPart();
assertThat(nextBinaryIdPart, matchesPattern("[a-zA-Z0-9]{32}"));
@@ -1105,6 +1102,7 @@ public class BulkDataExportTest extends BaseResourceProviderR4Test {
String nextNdJsonFileContent = new String(binary.getContent(), Constants.CHARSET_UTF8);
try (var iter = new LineIterator(new StringReader(nextNdJsonFileContent))) {
+ AtomicBoolean gate = new AtomicBoolean(false);
iter.forEachRemaining(t -> {
if (isNotBlank(t)) {
IBaseResource next = myFhirContext.newJsonParser().parseResource(t);
@@ -1117,7 +1115,10 @@ public class BulkDataExportTest extends BaseResourceProviderR4Test {
}
}
}
+ gate.set(true);
});
+ await().atMost(400, TimeUnit.MILLISECONDS)
+ .until(gate::get);
} catch (IOException e) {
fail(e.toString());
}
diff --git a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/dao/r4/FhirSystemDaoR4Test.java b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/dao/r4/FhirSystemDaoR4Test.java
index 93dc9dfc6e3..a628a79539b 100644
--- a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/dao/r4/FhirSystemDaoR4Test.java
+++ b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/dao/r4/FhirSystemDaoR4Test.java
@@ -93,7 +93,6 @@ import org.springframework.transaction.support.TransactionTemplate;
import jakarta.annotation.Nonnull;
import java.io.IOException;
import java.io.InputStream;
-import java.io.UnsupportedEncodingException;
import java.math.BigDecimal;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
@@ -582,13 +581,13 @@ public class FhirSystemDaoR4Test extends BaseJpaR4SystemTest {
p.addName().setFamily("family");
final IIdType id = myPatientDao.create(p, mySrd).getId().toUnqualified();
- sleepUntilTimeChanges();
+ sleepUntilTimeChange();
ValueSet vs = new ValueSet();
vs.setUrl("http://foo");
myValueSetDao.create(vs, mySrd);
- sleepUntilTimeChanges();
+ sleepUntilTimeChange();
ResourceTable entity = new TransactionTemplate(myTxManager).execute(t -> myEntityManager.find(ResourceTable.class, id.getIdPartAsLong()));
assertEquals(Long.valueOf(1), entity.getIndexStatus());
diff --git a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/delete/job/ReindexJobTest.java b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/delete/job/ReindexJobTest.java
index 77cbd0d21ce..d033d703d17 100644
--- a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/delete/job/ReindexJobTest.java
+++ b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/delete/job/ReindexJobTest.java
@@ -18,7 +18,10 @@ import ca.uhn.fhir.jpa.model.entity.ResourceTable;
import ca.uhn.fhir.jpa.searchparam.SearchParameterMap;
import ca.uhn.fhir.jpa.test.BaseJpaR4Test;
import ca.uhn.fhir.jpa.test.PatientReindexTestHelper;
+import ca.uhn.fhir.rest.api.server.SystemRequestDetails;
import ca.uhn.fhir.rest.server.exceptions.ResourceGoneException;
+import jakarta.annotation.PostConstruct;
+import jakarta.persistence.Query;
import org.hl7.fhir.instance.model.api.IIdType;
import org.hl7.fhir.r4.model.Observation;
import org.hl7.fhir.r4.model.Patient;
@@ -30,8 +33,6 @@ import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import org.springframework.beans.factory.annotation.Autowired;
-import jakarta.annotation.PostConstruct;
-import jakarta.persistence.Query;
import java.util.Date;
import java.util.List;
import java.util.stream.Stream;
@@ -263,7 +264,7 @@ public class ReindexJobTest extends BaseJpaR4Test {
.setOptimizeStorage(ReindexParameters.OptimizeStorageModeEnum.CURRENT_VERSION)
.setReindexSearchParameters(ReindexParameters.ReindexSearchParametersEnum.NONE)
);
- Batch2JobStartResponse startResponse = myJobCoordinator.startInstance(startRequest);
+ Batch2JobStartResponse startResponse = myJobCoordinator.startInstance(new SystemRequestDetails(), startRequest);
JobInstance outcome = myBatch2JobHelper.awaitJobCompletion(startResponse);
assertEquals(10, outcome.getCombinedRecordsProcessed());
@@ -358,7 +359,7 @@ public class ReindexJobTest extends BaseJpaR4Test {
myReindexTestHelper.createObservationWithAlleleExtension(Observation.ObservationStatus.FINAL);
}
- sleepUntilTimeChanges();
+ sleepUntilTimeChange();
myReindexTestHelper.createAlleleSearchParameter();
mySearchParamRegistry.forceRefresh();
@@ -390,7 +391,7 @@ public class ReindexJobTest extends BaseJpaR4Test {
JobInstanceStartRequest startRequest = new JobInstanceStartRequest();
startRequest.setJobDefinitionId(ReindexAppCtx.JOB_REINDEX);
startRequest.setParameters(new ReindexJobParameters());
- Batch2JobStartResponse startResponse = myJobCoordinator.startInstance(startRequest);
+ Batch2JobStartResponse startResponse = myJobCoordinator.startInstance(new SystemRequestDetails(), startRequest);
JobInstance myJob = myBatch2JobHelper.awaitJobCompletion(startResponse);
assertEquals(StatusEnum.COMPLETED, myJob.getStatus());
@@ -445,7 +446,7 @@ public class ReindexJobTest extends BaseJpaR4Test {
JobInstanceStartRequest startRequest = new JobInstanceStartRequest();
startRequest.setJobDefinitionId(ReindexAppCtx.JOB_REINDEX);
startRequest.setParameters(new ReindexJobParameters());
- Batch2JobStartResponse startResponse = myJobCoordinator.startInstance(startRequest);
+ Batch2JobStartResponse startResponse = myJobCoordinator.startInstance(new SystemRequestDetails(), startRequest);
JobInstance outcome = myBatch2JobHelper.awaitJobFailure(startResponse);
// Verify
diff --git a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/provider/r4/AuthorizationInterceptorJpaR4Test.java b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/provider/r4/AuthorizationInterceptorJpaR4Test.java
index ac93db3d643..422db3d6fe7 100644
--- a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/provider/r4/AuthorizationInterceptorJpaR4Test.java
+++ b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/provider/r4/AuthorizationInterceptorJpaR4Test.java
@@ -82,6 +82,7 @@ import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.startsWith;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
@@ -440,8 +441,9 @@ public class AuthorizationInterceptorJpaR4Test extends BaseResourceProviderR4Tes
}.setValidationSupport(myValidationSupport));
// Should be ok
- myClient.read().resource(Observation.class).withId("Observation/allowed").execute();
+ Observation result = myClient.read().resource(Observation.class).withId("Observation/allowed").execute();
+ assertNotNull(result);
}
@Test
@@ -463,8 +465,10 @@ public class AuthorizationInterceptorJpaR4Test extends BaseResourceProviderR4Tes
}.setValidationSupport(myValidationSupport));
// Should be ok
- myClient.read().resource(Patient.class).withId("Patient/P").execute();
- myClient.read().resource(Observation.class).withId("Observation/O").execute();
+ Patient pat = myClient.read().resource(Patient.class).withId("Patient/P").execute();
+ Observation obs = myClient.read().resource(Observation.class).withId("Observation/O").execute();
+ assertNotNull(pat);
+ assertNotNull(obs);
}
/**
diff --git a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/provider/r4/ResourceProviderCustomSearchParamR4Test.java b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/provider/r4/ResourceProviderCustomSearchParamR4Test.java
index f6b9cd4b9df..b0239d24ee3 100644
--- a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/provider/r4/ResourceProviderCustomSearchParamR4Test.java
+++ b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/provider/r4/ResourceProviderCustomSearchParamR4Test.java
@@ -244,12 +244,15 @@ public class ResourceProviderCustomSearchParamR4Test extends BaseResourceProvide
mySearchParameterDao.create(fooSp, mySrd);
runInTransaction(() -> {
+ myBatch2JobHelper.forceRunMaintenancePass();
+
List allJobs = myBatch2JobHelper.findJobsByDefinition(ReindexAppCtx.JOB_REINDEX);
assertEquals(1, allJobs.size());
assertEquals(1, allJobs.get(0).getParameters(ReindexJobParameters.class).getPartitionedUrls().size());
assertEquals("Patient?", allJobs.get(0).getParameters(ReindexJobParameters.class).getPartitionedUrls().get(0).getUrl());
});
+ myBatch2JobHelper.awaitNoJobsRunning();
}
@Test
diff --git a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/provider/r4/ResourceProviderR4BundleTest.java b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/provider/r4/ResourceProviderR4BundleTest.java
index 2b9f3891249..1136f192cc9 100644
--- a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/provider/r4/ResourceProviderR4BundleTest.java
+++ b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/provider/r4/ResourceProviderR4BundleTest.java
@@ -3,9 +3,11 @@ package ca.uhn.fhir.jpa.provider.r4;
import ca.uhn.fhir.jpa.api.config.JpaStorageSettings;
import ca.uhn.fhir.jpa.model.util.JpaConstants;
import ca.uhn.fhir.jpa.provider.BaseResourceProviderR4Test;
+import ca.uhn.fhir.jpa.test.config.TestR4Config;
import ca.uhn.fhir.rest.server.exceptions.NotImplementedOperationException;
import com.google.common.base.Charsets;
import org.apache.commons.io.IOUtils;
+import org.hl7.fhir.instance.model.api.IBaseResource;
import org.hl7.fhir.instance.model.api.IIdType;
import org.hl7.fhir.r4.model.Bundle;
import org.hl7.fhir.r4.model.Bundle.BundleEntryComponent;
@@ -24,19 +26,32 @@ import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
+import java.util.concurrent.CompletionService;
+import java.util.concurrent.ExecutorCompletionService;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
+import static org.awaitility.Awaitility.await;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
public class ResourceProviderR4BundleTest extends BaseResourceProviderR4Test {
private static final org.slf4j.Logger ourLog = org.slf4j.LoggerFactory.getLogger(ResourceProviderR4BundleTest.class);
+ private static final int DESIRED_MAX_THREADS = 5;
+
+ static {
+ if (TestR4Config.ourMaxThreads == null || TestR4Config.ourMaxThreads < DESIRED_MAX_THREADS) {
+ TestR4Config.ourMaxThreads = DESIRED_MAX_THREADS;
+ }
+ }
+
@BeforeEach
@Override
public void before() throws Exception {
@@ -52,6 +67,7 @@ public class ResourceProviderR4BundleTest extends BaseResourceProviderR4Test {
myStorageSettings.setBundleBatchPoolSize(JpaStorageSettings.DEFAULT_BUNDLE_BATCH_POOL_SIZE);
myStorageSettings.setBundleBatchMaxPoolSize(JpaStorageSettings.DEFAULT_BUNDLE_BATCH_MAX_POOL_SIZE);
}
+
/**
* See #401
*/
@@ -69,14 +85,13 @@ public class ResourceProviderR4BundleTest extends BaseResourceProviderR4Test {
Bundle retBundle = myClient.read().resource(Bundle.class).withId(id).execute();
- ourLog.debug(myFhirContext.newXmlParser().setPrettyPrint(true).encodeResourceToString(retBundle));
+ ourLog.debug(myFhirContext.newXmlParser().setPrettyPrint(true).encodeResourceToString(retBundle));
assertEquals("http://foo/", bundle.getEntry().get(0).getFullUrl());
}
@Test
public void testProcessMessage() {
-
Bundle bundle = new Bundle();
bundle.setType(BundleType.MESSAGE);
@@ -117,22 +132,41 @@ public class ResourceProviderR4BundleTest extends BaseResourceProviderR4Test {
}
-
@Test
- public void testHighConcurrencyWorks() throws IOException, InterruptedException {
+ public void testHighConcurrencyWorks() throws IOException {
List bundles = new ArrayList<>();
for (int i =0 ; i < 10; i ++) {
bundles.add(myFhirContext.newJsonParser().parseResource(Bundle.class, IOUtils.toString(getClass().getResourceAsStream("/r4/identical-tags-batch.json"), Charsets.UTF_8)));
}
- ExecutorService tpe = Executors.newFixedThreadPool(4);
- for (Bundle bundle :bundles) {
- tpe.execute(() -> myClient.transaction().withBundle(bundle).execute());
- }
- tpe.shutdown();
- tpe.awaitTermination(100, TimeUnit.SECONDS);
- }
+ int desiredMaxThreads = DESIRED_MAX_THREADS - 1;
+ int maxThreads = TestR4Config.getMaxThreads();
+ // we want strictly > because we want at least 1 extra thread hanging around for
+ // any spun off processes needed internally during the transaction
+ assertTrue(maxThreads > desiredMaxThreads, String.format("Wanted > %d threads, but we only have %d available", desiredMaxThreads, maxThreads));
+ ExecutorService tpe = Executors.newFixedThreadPool(desiredMaxThreads);
+ CompletionService completionService = new ExecutorCompletionService<>(tpe);
+ for (Bundle bundle : bundles) {
+ completionService.submit(() -> myClient.transaction().withBundle(bundle).execute());
+ }
+
+ int count = 0;
+ int expected = bundles.size();
+ while (count < expected) {
+ try {
+ completionService.take();
+ count++;
+ } catch (Exception ex) {
+ ourLog.error(ex.getMessage());
+ fail(ex.getMessage());
+ }
+ }
+
+ tpe.shutdown();
+ await().atMost(100, TimeUnit.SECONDS)
+ .until(tpe::isShutdown);
+ }
@Test
public void testBundleBatchWithSingleThread() {
@@ -144,8 +178,9 @@ public class ResourceProviderR4BundleTest extends BaseResourceProviderR4Test {
Bundle input = new Bundle();
input.setType(BundleType.BATCH);
- for (String id : ids)
- input.addEntry().getRequest().setMethod(HTTPVerb.GET).setUrl(id);
+ for (String id : ids) {
+ input.addEntry().getRequest().setMethod(HTTPVerb.GET).setUrl(id);
+ }
Bundle output = myClient.transaction().withBundle(input).execute();
@@ -158,9 +193,8 @@ public class ResourceProviderR4BundleTest extends BaseResourceProviderR4Test {
for (BundleEntryComponent bundleEntry : bundleEntries) {
assertEquals(ids.get(i++), bundleEntry.getResource().getIdElement().toUnqualifiedVersionless().getValueAsString());
}
-
-
}
+
@Test
public void testBundleBatchWithError() {
List ids = createPatients(5);
@@ -351,7 +385,8 @@ public class ResourceProviderR4BundleTest extends BaseResourceProviderR4Test {
bundle.getEntry().forEach(entry -> carePlans.add((CarePlan) entry.getResource()));
// Post CarePlans should not get: HAPI-2006: Unable to perform PUT, URL provided is invalid...
- myClient.transaction().withResources(carePlans).execute();
+ List result = myClient.transaction().withResources(carePlans).execute();
+ assertFalse(result.isEmpty());
}
}
diff --git a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/provider/r4/ResourceProviderR4CodeSystemTest.java b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/provider/r4/ResourceProviderR4CodeSystemTest.java
index 06693388bee..c7dff314553 100644
--- a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/provider/r4/ResourceProviderR4CodeSystemTest.java
+++ b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/provider/r4/ResourceProviderR4CodeSystemTest.java
@@ -6,6 +6,7 @@ import ca.uhn.fhir.jpa.model.entity.ResourceTable;
import ca.uhn.fhir.jpa.model.util.JpaConstants;
import ca.uhn.fhir.jpa.provider.BaseResourceProviderR4Test;
import ca.uhn.fhir.jpa.term.TermTestUtil;
+import ca.uhn.fhir.jpa.term.api.ITermDeferredStorageSvc;
import ca.uhn.fhir.rest.server.exceptions.InvalidRequestException;
import ca.uhn.fhir.rest.server.exceptions.ResourceNotFoundException;
import org.apache.commons.io.IOUtils;
@@ -26,10 +27,13 @@ import org.hl7.fhir.r4.model.UriType;
import org.hl7.fhir.r4.model.codesystems.ConceptSubsumptionOutcome;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.transaction.annotation.Transactional;
import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+import static org.awaitility.Awaitility.await;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
@@ -37,12 +41,16 @@ import static org.junit.jupiter.api.Assertions.fail;
public class ResourceProviderR4CodeSystemTest extends BaseResourceProviderR4Test {
+
private static final String SYSTEM_PARENTCHILD = "http://parentchild";
private static final org.slf4j.Logger ourLog = org.slf4j.LoggerFactory.getLogger(ResourceProviderR4CodeSystemTest.class);
private static final String CS_ACME_URL = "http://acme.org";
private Long parentChildCsId;
private IIdType myCsId;
+ @Autowired
+ private ITermDeferredStorageSvc myITermDeferredStorageSvc;
+
@BeforeEach
@Transactional
public void before02() throws IOException {
@@ -63,6 +71,13 @@ public class ResourceProviderR4CodeSystemTest extends BaseResourceProviderR4Test
DaoMethodOutcome parentChildCsOutcome = myCodeSystemDao.create(parentChildCs);
parentChildCsId = ((ResourceTable) parentChildCsOutcome.getEntity()).getId();
+ // ensure all terms are loaded
+ await().atMost(5, TimeUnit.SECONDS)
+ .until(() -> {
+ myBatch2JobHelper.forceRunMaintenancePass();
+ myITermDeferredStorageSvc.saveDeferred();
+ return myITermDeferredStorageSvc.isStorageQueueEmpty(true);
+ });
}
@Test
diff --git a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/reindex/ResourceReindexSvcImplTest.java b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/reindex/ResourceReindexSvcImplTest.java
index 2fcf5b19b1e..1cd96f9a46a 100644
--- a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/reindex/ResourceReindexSvcImplTest.java
+++ b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/reindex/ResourceReindexSvcImplTest.java
@@ -30,22 +30,22 @@ public class ResourceReindexSvcImplTest extends BaseJpaR4Test {
// Setup
createPatient(withActiveFalse());
- sleepUntilTimeChanges();
+ sleepUntilTimeChange();
Date start = new Date();
Long id0 = createPatient(withActiveFalse()).getIdPartAsLong();
- sleepUntilTimeChanges();
+ sleepUntilTimeChange();
Long id1 = createPatient(withActiveFalse()).getIdPartAsLong();
- sleepUntilTimeChanges();
+ sleepUntilTimeChange();
Date beforeLastInRange = new Date();
- sleepUntilTimeChanges();
+ sleepUntilTimeChange();
Long id2 = createObservation(withObservationCode("http://foo", "bar")).getIdPartAsLong();
- sleepUntilTimeChanges();
+ sleepUntilTimeChange();
Date end = new Date();
- sleepUntilTimeChanges();
+ sleepUntilTimeChange();
createPatient(withActiveFalse());
@@ -103,26 +103,26 @@ public class ResourceReindexSvcImplTest extends BaseJpaR4Test {
// Setup
final Long patientId0 = createPatient(withActiveFalse()).getIdPartAsLong();
- sleepUntilTimeChanges();
+ sleepUntilTimeChange();
// Start of resources within range
Date start = new Date();
- sleepUntilTimeChanges();
+ sleepUntilTimeChange();
Long patientId1 = createPatient(withActiveFalse()).getIdPartAsLong();
createObservation(withObservationCode("http://foo", "bar"));
createObservation(withObservationCode("http://foo", "bar"));
- sleepUntilTimeChanges();
+ sleepUntilTimeChange();
Date beforeLastInRange = new Date();
- sleepUntilTimeChanges();
+ sleepUntilTimeChange();
Long patientId2 = createPatient(withActiveFalse()).getIdPartAsLong();
- sleepUntilTimeChanges();
+ sleepUntilTimeChange();
Date end = new Date();
- sleepUntilTimeChanges();
+ sleepUntilTimeChange();
// End of resources within range
createObservation(withObservationCode("http://foo", "bar"));
final Long patientId3 = createPatient(withActiveFalse()).getIdPartAsLong();
- sleepUntilTimeChanges();
+ sleepUntilTimeChange();
// Execute
diff --git a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/term/job/TermCodeSystemDeleteJobTest.java b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/term/job/TermCodeSystemDeleteJobTest.java
index 9b541282fa0..7f7897869d2 100644
--- a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/term/job/TermCodeSystemDeleteJobTest.java
+++ b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/term/job/TermCodeSystemDeleteJobTest.java
@@ -31,6 +31,7 @@ import ca.uhn.fhir.jpa.term.ZipCollectionBuilder;
import ca.uhn.fhir.jpa.term.models.TermCodeSystemDeleteJobParameters;
import ca.uhn.fhir.jpa.test.BaseJpaR4Test;
import ca.uhn.fhir.jpa.test.Batch2JobHelper;
+import ca.uhn.fhir.rest.api.server.SystemRequestDetails;
import ca.uhn.fhir.rest.server.exceptions.InvalidRequestException;
import ca.uhn.fhir.rest.server.servlet.ServletRequestDetails;
import ca.uhn.fhir.util.JsonUtil;
@@ -127,7 +128,7 @@ public class TermCodeSystemDeleteJobTest extends BaseJpaR4Test {
JobInstanceStartRequest request = new JobInstanceStartRequest();
request.setJobDefinitionId(TERM_CODE_SYSTEM_DELETE_JOB_NAME);
request.setParameters(JsonUtil.serialize(parameters));
- Batch2JobStartResponse response = myJobCoordinator.startInstance(request);
+ Batch2JobStartResponse response = myJobCoordinator.startInstance(new SystemRequestDetails(), request);
myBatch2JobHelper.awaitJobCompletion(response);
@@ -147,7 +148,7 @@ public class TermCodeSystemDeleteJobTest extends BaseJpaR4Test {
request.setParameters(new TermCodeSystemDeleteJobParameters()); // no pid
InvalidRequestException exception = assertThrows(InvalidRequestException.class, () -> {
- myJobCoordinator.startInstance(request);
+ myJobCoordinator.startInstance(new SystemRequestDetails(), request);
});
assertTrue(exception.getMessage().contains("Invalid Term Code System PID 0"), exception.getMessage());
}
diff --git a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/testjob/TestJobDefinitionUtils.java b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/testjob/TestJobDefinitionUtils.java
new file mode 100644
index 00000000000..230edc6881c
--- /dev/null
+++ b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/testjob/TestJobDefinitionUtils.java
@@ -0,0 +1,67 @@
+package ca.uhn.fhir.testjob;
+
+import ca.uhn.fhir.batch2.api.IJobCompletionHandler;
+import ca.uhn.fhir.batch2.api.IJobStepWorker;
+import ca.uhn.fhir.batch2.api.VoidModel;
+import ca.uhn.fhir.batch2.model.JobDefinition;
+import ca.uhn.fhir.model.api.IModelJson;
+import ca.uhn.fhir.testjob.models.FirstStepOutput;
+import ca.uhn.fhir.testjob.models.TestJobParameters;
+
+@SuppressWarnings({"unchecked", "rawtypes"})
+public class TestJobDefinitionUtils {
+
+ public static final int TEST_JOB_VERSION = 1;
+ public static final String FIRST_STEP_ID = "first-step";
+ public static final String LAST_STEP_ID = "last-step";
+
+ /**
+ * Creates a test job definition.
+ * This job will not be gated.
+ */
+ public static JobDefinition extends IModelJson> buildJobDefinition(
+ String theJobId,
+ IJobStepWorker theFirstStep,
+ IJobStepWorker theLastStep,
+ IJobCompletionHandler theCompletionHandler) {
+ return getJobBuilder(theJobId, theFirstStep, theLastStep, theCompletionHandler).build();
+ }
+
+ /**
+ * Creates a test job defintion.
+ * This job will be gated.
+ */
+ public static JobDefinition extends IModelJson> buildGatedJobDefinition(
+ String theJobId,
+ IJobStepWorker theFirstStep,
+ IJobStepWorker theLastStep,
+ IJobCompletionHandler theCompletionHandler) {
+ return getJobBuilder(theJobId, theFirstStep, theLastStep, theCompletionHandler)
+ .gatedExecution().build();
+ }
+
+ private static JobDefinition.Builder getJobBuilder(
+ String theJobId,
+ IJobStepWorker theFirstStep,
+ IJobStepWorker theLastStep,
+ IJobCompletionHandler theCompletionHandler
+ ) {
+ return JobDefinition.newBuilder()
+ .setJobDefinitionId(theJobId)
+ .setJobDescription("test job")
+ .setJobDefinitionVersion(TEST_JOB_VERSION)
+ .setParametersType(TestJobParameters.class)
+ .addFirstStep(
+ FIRST_STEP_ID,
+ "Test first step",
+ FirstStepOutput.class,
+ theFirstStep
+ )
+ .addLastStep(
+ LAST_STEP_ID,
+ "Test last step",
+ theLastStep
+ )
+ .completionHandler(theCompletionHandler);
+ }
+}
diff --git a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/testjob/models/FirstStepOutput.java b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/testjob/models/FirstStepOutput.java
new file mode 100644
index 00000000000..34cefc682f8
--- /dev/null
+++ b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/testjob/models/FirstStepOutput.java
@@ -0,0 +1,9 @@
+package ca.uhn.fhir.testjob.models;
+
+import ca.uhn.fhir.model.api.IModelJson;
+
+/**
+ * Sample first step output for test job defintions created in {@link ca.uhn.fhir.testjob.TestJobDefinitionUtils}
+ */
+public class FirstStepOutput implements IModelJson {
+}
diff --git a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/testjob/models/ReductionStepOutput.java b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/testjob/models/ReductionStepOutput.java
new file mode 100644
index 00000000000..62e2a101188
--- /dev/null
+++ b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/testjob/models/ReductionStepOutput.java
@@ -0,0 +1,9 @@
+package ca.uhn.fhir.testjob.models;
+
+import ca.uhn.fhir.model.api.IModelJson;
+
+/**
+ * Sample output object for reduction steps for test job created in {@link ca.uhn.fhir.testjob.TestJobDefinitionUtils}
+ */
+public class ReductionStepOutput implements IModelJson {
+}
diff --git a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/testjob/models/TestJobParameters.java b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/testjob/models/TestJobParameters.java
new file mode 100644
index 00000000000..6fb3aa8650c
--- /dev/null
+++ b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/testjob/models/TestJobParameters.java
@@ -0,0 +1,9 @@
+package ca.uhn.fhir.testjob.models;
+
+import ca.uhn.fhir.model.api.IModelJson;
+
+/**
+ * Sample job parameters; these are used for jobs created in {@link ca.uhn.fhir.testjob.TestJobDefinitionUtils}
+ */
+public class TestJobParameters implements IModelJson {
+}
diff --git a/hapi-fhir-jpaserver-test-r4b/pom.xml b/hapi-fhir-jpaserver-test-r4b/pom.xml
index 8a2cd86b95d..0c26dd83b61 100644
--- a/hapi-fhir-jpaserver-test-r4b/pom.xml
+++ b/hapi-fhir-jpaserver-test-r4b/pom.xml
@@ -6,7 +6,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 7.3.0-SNAPSHOT
+ 7.3.1-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-jpaserver-test-r5/pom.xml b/hapi-fhir-jpaserver-test-r5/pom.xml
index 0c7622cb1df..b3dc3455ff3 100644
--- a/hapi-fhir-jpaserver-test-r5/pom.xml
+++ b/hapi-fhir-jpaserver-test-r5/pom.xml
@@ -6,7 +6,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 7.3.0-SNAPSHOT
+ 7.3.1-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-jpaserver-test-utilities/pom.xml b/hapi-fhir-jpaserver-test-utilities/pom.xml
index b756a6dbbd0..5aad3443ecb 100644
--- a/hapi-fhir-jpaserver-test-utilities/pom.xml
+++ b/hapi-fhir-jpaserver-test-utilities/pom.xml
@@ -6,7 +6,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 7.3.0-SNAPSHOT
+ 7.3.1-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-jpaserver-test-utilities/src/main/java/ca/uhn/fhir/jpa/test/BaseJpaR4Test.java b/hapi-fhir-jpaserver-test-utilities/src/main/java/ca/uhn/fhir/jpa/test/BaseJpaR4Test.java
index 33d536600c1..ae49003702d 100644
--- a/hapi-fhir-jpaserver-test-utilities/src/main/java/ca/uhn/fhir/jpa/test/BaseJpaR4Test.java
+++ b/hapi-fhir-jpaserver-test-utilities/src/main/java/ca/uhn/fhir/jpa/test/BaseJpaR4Test.java
@@ -19,6 +19,7 @@
*/
package ca.uhn.fhir.jpa.test;
+import ca.uhn.fhir.batch2.api.IJobMaintenanceService;
import ca.uhn.fhir.batch2.jobs.export.BulkDataExportProvider;
import ca.uhn.fhir.context.FhirContext;
import ca.uhn.fhir.context.support.IValidationSupport;
@@ -218,6 +219,7 @@ import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.empty;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.fail;
@ExtendWith(SpringExtension.class)
@@ -247,7 +249,7 @@ public abstract class BaseJpaR4Test extends BaseJpaTest implements ITestDataBuil
@Autowired
protected ISearchDao mySearchEntityDao;
@Autowired
- private IBatch2JobInstanceRepository myJobInstanceRepository;
+ protected IBatch2JobInstanceRepository myJobInstanceRepository;
@Autowired
private IBatch2WorkChunkRepository myWorkChunkRepository;
@@ -553,11 +555,18 @@ public abstract class BaseJpaR4Test extends BaseJpaTest implements ITestDataBuil
@Autowired
protected TestDaoSearch myTestDaoSearch;
+ @Autowired
+ protected IJobMaintenanceService myJobMaintenanceService;
+
@RegisterExtension
private final PreventDanglingInterceptorsExtension myPreventDanglingInterceptorsExtension = new PreventDanglingInterceptorsExtension(()-> myInterceptorRegistry);
@AfterEach()
+ @Order(0)
public void afterCleanupDao() {
+ // make sure there are no running jobs
+ assertFalse(myBatch2JobHelper.hasRunningJobs());
+
myStorageSettings.setExpireSearchResults(new JpaStorageSettings().isExpireSearchResults());
myStorageSettings.setEnforceReferentialIntegrityOnDelete(new JpaStorageSettings().isEnforceReferentialIntegrityOnDelete());
myStorageSettings.setExpireSearchResultsAfterMillis(new JpaStorageSettings().getExpireSearchResultsAfterMillis());
@@ -572,6 +581,7 @@ public abstract class BaseJpaR4Test extends BaseJpaTest implements ITestDataBuil
myPagingProvider.setMaximumPageSize(BasePagingProvider.DEFAULT_MAX_PAGE_SIZE);
myPartitionSettings.setPartitioningEnabled(false);
+ ourLog.info("1 - " + getClass().getSimpleName() + ".afterCleanupDao");
}
@Override
@@ -580,6 +590,8 @@ public abstract class BaseJpaR4Test extends BaseJpaTest implements ITestDataBuil
public void afterResetInterceptors() {
super.afterResetInterceptors();
myInterceptorRegistry.unregisterInterceptor(myPerformanceTracingLoggingInterceptor);
+
+ ourLog.info("2 - " + getClass().getSimpleName() + ".afterResetInterceptors");
}
@AfterEach
@@ -590,6 +602,8 @@ public abstract class BaseJpaR4Test extends BaseJpaTest implements ITestDataBuil
TermConceptMappingSvcImpl.clearOurLastResultsFromTranslationWithReverseCache();
TermDeferredStorageSvcImpl termDeferredStorageSvc = AopTestUtils.getTargetObject(myTerminologyDeferredStorageSvc);
termDeferredStorageSvc.clearDeferred();
+
+ ourLog.info("4 - " + getClass().getSimpleName() + ".afterClearTerminologyCaches");
}
@BeforeEach
@@ -613,6 +627,21 @@ public abstract class BaseJpaR4Test extends BaseJpaTest implements ITestDataBuil
@AfterEach
public void afterPurgeDatabase() {
+ /*
+ * We have to stop all scheduled jobs or they will
+ * interfere with the database cleanup!
+ */
+ ourLog.info("Pausing Schedulers");
+ mySchedulerService.pause();
+
+ myTerminologyDeferredStorageSvc.logQueueForUnitTest();
+ if (!myTermDeferredStorageSvc.isStorageQueueEmpty(true)) {
+ ourLog.warn("There is deferred terminology storage stuff still in the queue. Please verify your tests clean up ok.");
+ if (myTermDeferredStorageSvc instanceof TermDeferredStorageSvcImpl t) {
+ t.clearDeferred();
+ }
+ }
+
boolean registeredStorageInterceptor = false;
if (myMdmStorageInterceptor != null && !myInterceptorService.getAllRegisteredInterceptors().contains(myMdmStorageInterceptor)) {
myInterceptorService.registerInterceptor(myMdmStorageInterceptor);
@@ -635,6 +664,11 @@ public abstract class BaseJpaR4Test extends BaseJpaTest implements ITestDataBuil
myInterceptorService.unregisterInterceptor(myMdmStorageInterceptor);
}
}
+
+ // restart the jobs
+ ourLog.info("Restarting the schedulers");
+ mySchedulerService.unpause();
+ ourLog.info("5 - " + getClass().getSimpleName() + ".afterPurgeDatabases");
}
@BeforeEach
@@ -819,6 +853,7 @@ public abstract class BaseJpaR4Test extends BaseJpaTest implements ITestDataBuil
@AfterEach
public void afterEachClearCaches() {
myJpaValidationSupportChainR4.invalidateCaches();
+ ourLog.info("3 - " + getClass().getSimpleName() + ".afterEachClearCaches");
}
private static void flattenExpansionHierarchy(List theFlattenedHierarchy, List theCodes, String thePrefix) {
diff --git a/hapi-fhir-jpaserver-test-utilities/src/main/java/ca/uhn/fhir/jpa/test/BaseJpaTest.java b/hapi-fhir-jpaserver-test-utilities/src/main/java/ca/uhn/fhir/jpa/test/BaseJpaTest.java
index 8a6890d96ae..b8af987a9f2 100644
--- a/hapi-fhir-jpaserver-test-utilities/src/main/java/ca/uhn/fhir/jpa/test/BaseJpaTest.java
+++ b/hapi-fhir-jpaserver-test-utilities/src/main/java/ca/uhn/fhir/jpa/test/BaseJpaTest.java
@@ -69,6 +69,7 @@ import ca.uhn.fhir.jpa.model.entity.ResourceIndexedSearchParamToken;
import ca.uhn.fhir.jpa.model.entity.ResourceIndexedSearchParamUri;
import ca.uhn.fhir.jpa.model.entity.ResourceLink;
import ca.uhn.fhir.jpa.model.entity.ResourceTable;
+import ca.uhn.fhir.jpa.model.sched.ISchedulerService;
import ca.uhn.fhir.jpa.model.util.JpaConstants;
import ca.uhn.fhir.jpa.partition.IPartitionLookupSvc;
import ca.uhn.fhir.jpa.search.DatabaseBackedPagingProvider;
@@ -77,6 +78,7 @@ import ca.uhn.fhir.jpa.search.cache.ISearchResultCacheSvc;
import ca.uhn.fhir.jpa.search.reindex.IResourceReindexingSvc;
import ca.uhn.fhir.jpa.subscription.match.registry.SubscriptionLoader;
import ca.uhn.fhir.jpa.subscription.match.registry.SubscriptionRegistry;
+import ca.uhn.fhir.jpa.term.api.ITermDeferredStorageSvc;
import ca.uhn.fhir.jpa.util.CircularQueueCaptureQueriesListener;
import ca.uhn.fhir.jpa.util.MemoryCacheService;
import ca.uhn.fhir.rest.api.server.IBundleProvider;
@@ -243,6 +245,8 @@ public abstract class BaseJpaTest extends BaseTest {
protected ITermConceptPropertyDao myTermConceptPropertyDao;
@Autowired
private MemoryCacheService myMemoryCacheService;
+ @Autowired
+ protected ISchedulerService mySchedulerService;
@Qualifier(JpaConfig.JPA_VALIDATION_SUPPORT)
@Autowired
private IValidationSupport myJpaPersistedValidationSupport;
@@ -256,6 +260,8 @@ public abstract class BaseJpaTest extends BaseTest {
private IResourceHistoryTableDao myResourceHistoryTableDao;
@Autowired
private DaoRegistry myDaoRegistry;
+ @Autowired
+ protected ITermDeferredStorageSvc myTermDeferredStorageSvc;
private final List