provideStatuses() {
+ return List.of(
+ Arguments.of(StatusEnum.QUEUED, true),
+ Arguments.of(StatusEnum.IN_PROGRESS, true),
+ Arguments.of(StatusEnum.ERRORED, true),
+ Arguments.of(StatusEnum.FAILED, false),
+ Arguments.of(StatusEnum.COMPLETED, false)
+ );
+ }
}
diff --git a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/bulk/BulkDataErrorAbuseTest.java b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/bulk/BulkDataErrorAbuseTest.java
new file mode 100644
index 00000000000..1a5808bfe0e
--- /dev/null
+++ b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/bulk/BulkDataErrorAbuseTest.java
@@ -0,0 +1,225 @@
+package ca.uhn.fhir.jpa.bulk;
+
+import ca.uhn.fhir.jpa.api.config.DaoConfig;
+import ca.uhn.fhir.jpa.api.model.BulkExportJobResults;
+import ca.uhn.fhir.jpa.api.svc.IBatch2JobRunner;
+import ca.uhn.fhir.jpa.batch.models.Batch2JobStartResponse;
+import ca.uhn.fhir.jpa.provider.BaseResourceProviderR4Test;
+import ca.uhn.fhir.jpa.util.BulkExportUtils;
+import ca.uhn.fhir.rest.api.Constants;
+import ca.uhn.fhir.rest.api.server.bulk.BulkDataExportOptions;
+import ca.uhn.fhir.rest.server.exceptions.InternalErrorException;
+import ca.uhn.fhir.util.JsonUtil;
+import com.google.common.collect.Sets;
+import org.hl7.fhir.r4.model.Binary;
+import org.hl7.fhir.r4.model.Enumerations;
+import org.hl7.fhir.r4.model.Group;
+import org.hl7.fhir.r4.model.IdType;
+import org.hl7.fhir.r4.model.Patient;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.annotation.Autowired;
+
+import java.io.BufferedReader;
+import java.io.StringReader;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.emptyOrNullString;
+import static org.hamcrest.Matchers.hasItem;
+import static org.hamcrest.Matchers.not;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
+
+/**
+ * A test to poke at our job framework and induce errors.
+ */
+public class BulkDataErrorAbuseTest extends BaseResourceProviderR4Test {
+ private static final Logger ourLog = LoggerFactory.getLogger(BulkDataErrorAbuseTest.class);
+
+ @Autowired
+ private DaoConfig myDaoConfig;
+
+ @Autowired
+ private IBatch2JobRunner myJobRunner;
+
+ @AfterEach
+ void afterEach() {
+ myDaoConfig.setIndexMissingFields(DaoConfig.IndexEnabledEnum.DISABLED);
+ }
+
+ @Test
+ public void testGroupBulkExportNotInGroup_DoesNotShowUp() throws InterruptedException, ExecutionException {
+ duAbuseTest(100);
+ }
+
+ /**
+ * This test is disabled because it never actually exists. Run it if you want to ensure
+ * that changes to the Bulk Export Batch2 task haven't affected our ability to successfully
+ * run endless parallel jobs. If you run it for a few minutes and it never stops on its own,
+ * you are good.
+ *
+ * The enabled test above called {@link #testGroupBulkExportNotInGroup_DoesNotShowUp()} does
+ * run with the build and runs 100 jobs.
+ */
+ @Test
+ @Disabled
+ public void testNonStopAbuseBatch2BulkExportStressTest() throws InterruptedException, ExecutionException {
+ duAbuseTest(Integer.MAX_VALUE);
+ }
+
+ private void duAbuseTest(int taskExecutions) throws InterruptedException, ExecutionException {
+ // Create some resources
+ Patient patient = new Patient();
+ patient.setId("PING1");
+ patient.setGender(Enumerations.AdministrativeGender.FEMALE);
+ patient.setActive(true);
+ myClient.update().resource(patient).execute();
+
+ patient = new Patient();
+ patient.setId("PING2");
+ patient.setGender(Enumerations.AdministrativeGender.MALE);
+ patient.setActive(true);
+ myClient.update().resource(patient).execute();
+
+ patient = new Patient();
+ patient.setId("PNING3");
+ patient.setGender(Enumerations.AdministrativeGender.MALE);
+ patient.setActive(true);
+ myClient.update().resource(patient).execute();
+
+ Group group = new Group();
+ group.setId("Group/G2");
+ group.setActive(true);
+ group.addMember().getEntity().setReference("Patient/PING1");
+ group.addMember().getEntity().setReference("Patient/PING2");
+ myClient.update().resource(group).execute();
+
+ // set the export options
+ BulkDataExportOptions options = new BulkDataExportOptions();
+ options.setResourceTypes(Sets.newHashSet("Patient"));
+ options.setGroupId(new IdType("Group", "G2"));
+ options.setFilters(new HashSet<>());
+ options.setExportStyle(BulkDataExportOptions.ExportStyle.GROUP);
+ options.setOutputFormat(Constants.CT_FHIR_NDJSON);
+
+ BlockingQueue workQueue = new LinkedBlockingQueue<>();
+ ExecutorService executorService = new ThreadPoolExecutor(10, 10,
+ 0L, TimeUnit.MILLISECONDS,
+ workQueue);
+
+ ourLog.info("Starting task creation");
+
+ List> futures = new ArrayList<>();
+ for (int i = 0; i < taskExecutions; i++) {
+ futures.add(executorService.submit(() -> {
+ String instanceId = null;
+ try {
+ instanceId = startJob(options);
+
+ // Run a scheduled pass to build the export
+ myBatch2JobHelper.awaitJobCompletion(instanceId, 60);
+
+ verifyBulkExportResults(instanceId, List.of("Patient/PING1", "Patient/PING2"), Collections.singletonList("Patient/PNING3"));
+
+ return true;
+ } catch (Throwable theError) {
+ ourLog.error("Caught an error during processing instance {}", instanceId, theError);
+ throw new InternalErrorException("Caught an error during processing instance " + instanceId, theError);
+ }
+ }));
+
+ // Don't let the list of futures grow so big we run out of memory
+ if (futures.size() > 200) {
+ while (futures.size() > 100) {
+ // This should always return true, but it'll throw an exception if we failed
+ assertTrue(futures.remove(0).get());
+ }
+ }
+ }
+
+ ourLog.info("Done creating tasks, waiting for task completion");
+
+ for (var next : futures) {
+ // This should always return true, but it'll throw an exception if we failed
+ assertTrue(next.get());
+ }
+
+ ourLog.info("Finished task execution");
+ }
+
+
+ private void verifyBulkExportResults(String theInstanceId, List theContainedList, List theExcludedList) {
+ // Iterate over the files
+ String report = myJobRunner.getJobInfo(theInstanceId).getReport();
+ ourLog.debug("Export job {} report: {}", theInstanceId, report);
+ if (!theContainedList.isEmpty()) {
+ assertThat("report for instance " + theInstanceId + " is empty", report, not(emptyOrNullString()));
+ }
+ BulkExportJobResults results = JsonUtil.deserialize(report, BulkExportJobResults.class);
+
+ Set foundIds = new HashSet<>();
+ for (Map.Entry> file : results.getResourceTypeToBinaryIds().entrySet()) {
+ String resourceType = file.getKey();
+ List binaryIds = file.getValue();
+ for (var nextBinaryId : binaryIds) {
+
+ Binary binary = myBinaryDao.read(new IdType(nextBinaryId), mySrd);
+ assertEquals(Constants.CT_FHIR_NDJSON, binary.getContentType());
+
+ String nextNdJsonFileContent = new String(binary.getContent(), Constants.CHARSET_UTF8);
+ ourLog.trace("Export job {} file {} contents: {}", theInstanceId, nextBinaryId, nextNdJsonFileContent);
+
+ List lines = new BufferedReader(new StringReader(nextNdJsonFileContent))
+ .lines().toList();
+ ourLog.debug("Export job {} file {} line-count: {}", theInstanceId, nextBinaryId, lines.size());
+
+ lines.stream()
+ .map(line -> myFhirContext.newJsonParser().parseResource(line))
+ .map(r -> r.getIdElement().toUnqualifiedVersionless())
+ .forEach(nextId -> {
+ if (!resourceType.equals(nextId.getResourceType())) {
+ fail("Found resource of type " + nextId.getResourceType() + " in file for type " + resourceType);
+ } else {
+ if (!foundIds.add(nextId.getValue())) {
+ fail("Found duplicate ID: " + nextId.getValue());
+ }
+ }
+ });
+ }
+ }
+
+ ourLog.debug("Export job {} exported resources {}", theInstanceId, foundIds);
+
+ for (String containedString : theContainedList) {
+ assertThat("export has expected ids", foundIds, hasItem(containedString));
+ }
+ for (String excludedString : theExcludedList) {
+ assertThat("export doesn't have expected ids", foundIds, not(hasItem(excludedString)));
+ }
+ }
+
+ private String startJob(BulkDataExportOptions theOptions) {
+ Batch2JobStartResponse startResponse = myJobRunner.startNewJob(BulkExportUtils.createBulkExportJobParametersFromExportOptions(theOptions));
+ assertNotNull(startResponse);
+ return startResponse.getJobId();
+ }
+
+}
diff --git a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/bulk/BulkDataExportIT.java b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/bulk/BulkDataExportIT.java
index e2bdeddc74e..b015514569b 100644
--- a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/bulk/BulkDataExportIT.java
+++ b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/bulk/BulkDataExportIT.java
@@ -53,6 +53,7 @@ import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.stream.Stream;
+import static ca.uhn.fhir.jpa.dao.r4.FhirResourceDaoR4TagsInlineTest.createSearchParameterForInlineSecurity;
import static org.apache.commons.lang3.StringUtils.isNotBlank;
import static org.awaitility.Awaitility.await;
import static org.hamcrest.MatcherAssert.assertThat;
@@ -73,7 +74,8 @@ public class BulkDataExportIT extends BaseResourceProviderR4Test {
@AfterEach
void afterEach() {
- myDaoConfig.setIndexMissingFields(DaoConfig.IndexEnabledEnum.DISABLED);
+ myDaoConfig.setIndexMissingFields(new DaoConfig().getIndexMissingFields());
+ myDaoConfig.setTagStorageMode(new DaoConfig().getTagStorageMode());
}
@Test
@@ -108,6 +110,45 @@ public class BulkDataExportIT extends BaseResourceProviderR4Test {
verifyBulkExportResults(options, Collections.singletonList("Patient/PF"), Collections.singletonList("Patient/PM"));
}
+
+ @Test
+ public void testGroupBulkExportWithTypeFilter_OnTags_InlineTagMode() {
+ myDaoConfig.setTagStorageMode(DaoConfig.TagStorageModeEnum.INLINE);
+ mySearchParameterDao.update(createSearchParameterForInlineSecurity(), mySrd);
+ mySearchParamRegistry.forceRefresh();
+
+ // Create some resources
+ Patient patient = new Patient();
+ patient.setId("PF");
+ patient.getMeta().addSecurity("http://security", "val0", null);
+ patient.setActive(true);
+ myClient.update().resource(patient).execute();
+
+ patient = new Patient();
+ patient.setId("PM");
+ patient.getMeta().addSecurity("http://security", "val1", null);
+ patient.setActive(true);
+ myClient.update().resource(patient).execute();
+
+ Group group = new Group();
+ group.setId("Group/G");
+ group.setActive(true);
+ group.addMember().getEntity().setReference("Patient/PF");
+ group.addMember().getEntity().setReference("Patient/PM");
+ myClient.update().resource(group).execute();
+
+ // set the export options
+ BulkDataExportOptions options = new BulkDataExportOptions();
+ options.setResourceTypes(Sets.newHashSet("Patient"));
+ options.setGroupId(new IdType("Group", "G"));
+ options.setFilters(Sets.newHashSet("Patient?_security=http://security|val1"));
+ options.setExportStyle(BulkDataExportOptions.ExportStyle.GROUP);
+ options.setOutputFormat(Constants.CT_FHIR_NDJSON);
+ verifyBulkExportResults(options, Collections.singletonList("Patient/PM"), Collections.singletonList("Patient/PF"));
+ }
+
+
+
@Test
@Disabled("disabled to make the rel_6_4 release pipeline pass")
public void testGroupBulkExportNotInGroup_DoesNotShowUp() {
@@ -714,10 +755,10 @@ public class BulkDataExportIT extends BaseResourceProviderR4Test {
}
for (String containedString : theContainedList) {
- assertThat(foundIds, hasItem(containedString));
+ assertThat("Didn't find expected ID " + containedString + " in IDS: " + foundIds, foundIds, hasItem(containedString));
}
for (String excludedString : theExcludedList) {
- assertThat(foundIds, not(hasItem(excludedString)));
+ assertThat("Didn't want unexpected ID " + excludedString + " in IDS: " + foundIds, foundIds, not(hasItem(excludedString)));
}
}
diff --git a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/bulk/BulkExportUseCaseIT.java b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/bulk/BulkExportUseCaseIT.java
index 21e6782b984..e248128dbbb 100644
--- a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/bulk/BulkExportUseCaseIT.java
+++ b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/bulk/BulkExportUseCaseIT.java
@@ -1,21 +1,30 @@
package ca.uhn.fhir.jpa.bulk;
+import ca.uhn.fhir.batch2.api.IJobMaintenanceService;
import ca.uhn.fhir.batch2.api.IJobPersistence;
import ca.uhn.fhir.batch2.model.JobInstance;
+import ca.uhn.fhir.batch2.model.StatusEnum;
+import ca.uhn.fhir.batch2.model.WorkChunk;
import ca.uhn.fhir.jpa.api.config.DaoConfig;
import ca.uhn.fhir.jpa.api.model.BulkExportJobResults;
import ca.uhn.fhir.jpa.api.svc.IBatch2JobRunner;
import ca.uhn.fhir.jpa.batch.models.Batch2JobStartResponse;
import ca.uhn.fhir.jpa.bulk.export.model.BulkExportJobStatusEnum;
import ca.uhn.fhir.jpa.bulk.export.model.BulkExportResponseJson;
+import ca.uhn.fhir.jpa.dao.data.IBatch2JobInstanceRepository;
+import ca.uhn.fhir.jpa.dao.data.IBatch2WorkChunkRepository;
+import ca.uhn.fhir.jpa.entity.Batch2JobInstanceEntity;
+import ca.uhn.fhir.jpa.entity.Batch2WorkChunkEntity;
import ca.uhn.fhir.jpa.provider.BaseResourceProviderR4Test;
import ca.uhn.fhir.jpa.searchparam.SearchParameterMap;
+import ca.uhn.fhir.jpa.test.Batch2JobHelper;
import ca.uhn.fhir.jpa.util.BulkExportUtils;
import ca.uhn.fhir.parser.IParser;
import ca.uhn.fhir.rest.api.Constants;
import ca.uhn.fhir.rest.api.server.RequestDetails;
import ca.uhn.fhir.rest.api.server.SystemRequestDetails;
import ca.uhn.fhir.rest.api.server.bulk.BulkDataExportOptions;
+import ca.uhn.fhir.util.BundleBuilder;
import ca.uhn.fhir.util.JsonUtil;
import ca.uhn.fhir.util.SearchParameterUtil;
import com.google.common.collect.Sets;
@@ -33,6 +42,7 @@ import org.hl7.fhir.r4.model.Encounter;
import org.hl7.fhir.r4.model.Enumerations;
import org.hl7.fhir.r4.model.Group;
import org.hl7.fhir.r4.model.IdType;
+import org.hl7.fhir.r4.model.InstantType;
import org.hl7.fhir.r4.model.Observation;
import org.hl7.fhir.r4.model.Patient;
import org.hl7.fhir.r4.model.Reference;
@@ -51,6 +61,7 @@ import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collections;
+import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
@@ -84,6 +95,8 @@ public class BulkExportUseCaseIT extends BaseResourceProviderR4Test {
@Autowired
private IJobPersistence myJobPersistence;
+ @Autowired
+ private IJobMaintenanceService myJobMaintenanceService;
@Nested
@@ -366,6 +379,8 @@ public class BulkExportUseCaseIT extends BaseResourceProviderR4Test {
options.setExportStyle(BulkDataExportOptions.ExportStyle.SYSTEM);
options.setOutputFormat(Constants.CT_FHIR_NDJSON);
+ myCaptureQueriesListener.clear();
+
Batch2JobStartResponse startResponse = myJobRunner.startNewJob(BulkExportUtils.createBulkExportJobParametersFromExportOptions(options));
assertNotNull(startResponse);
@@ -375,6 +390,23 @@ public class BulkExportUseCaseIT extends BaseResourceProviderR4Test {
// Run a scheduled pass to build the export
myBatch2JobHelper.awaitJobCompletion(startResponse.getJobId());
+ String queries = myCaptureQueriesListener
+ .getUpdateQueries()
+ .stream()
+ .filter(t->t.getSql(false, false).toUpperCase().contains(" BT2_JOB_INSTANCE "))
+ .map(t->new InstantType(new Date(t.getQueryTimestamp())) + " - " + t.getSql(true, false))
+ .collect(Collectors.joining("\n * "));
+ ourLog.info("Update queries:\n * " + queries);
+
+ runInTransaction(()->{
+ String entities = myJobInstanceRepository
+ .findAll()
+ .stream()
+ .map(t->t.toString())
+ .collect(Collectors.joining("\n * "));
+ ourLog.info("Entities:\n * " + entities);
+ });
+
final Optional optJobInstance = myJobPersistence.fetchInstance(jobId);
assertNotNull(optJobInstance);
@@ -501,25 +533,28 @@ public class BulkExportUseCaseIT extends BaseResourceProviderR4Test {
@Disabled("temporary for rel_6_4")
public void testVeryLargeGroup() {
+ BundleBuilder bb = new BundleBuilder(myFhirContext);
+
Group group = new Group();
group.setId("Group/G");
group.setActive(true);
+ bb.addTransactionUpdateEntry(group);
for (int i = 0; i < 600; i++) {
Patient patient = new Patient();
patient.setId("PING-" + i);
patient.setGender(Enumerations.AdministrativeGender.FEMALE);
patient.setActive(true);
- myClient.update().resource(patient).execute();
+ bb.addTransactionUpdateEntry(patient);
group.addMember().getEntity().setReference("Patient/PING-" + i);
Observation obs = new Observation();
obs.setId("obs-" + i);
obs.setSubject(new Reference("Patient/PING-" + i));
- myClient.update().resource(obs).execute();
+ bb.addTransactionUpdateEntry(obs);
}
- myClient.update().resource(group).execute();
+ myClient.transaction().withBundle(bb.getBundle()).execute();
HashSet resourceTypes = Sets.newHashSet("Group", "Patient", "Observation");
BulkExportJobResults bulkExportJobResults = startGroupBulkExportJobAndAwaitCompletion(resourceTypes, new HashSet<>(), "G");
@@ -567,7 +602,6 @@ public class BulkExportUseCaseIT extends BaseResourceProviderR4Test {
@Test
@Disabled("failing intermittently for latest rel_6_4")
public void testDifferentTypesDoNotUseCachedResults() {
-
Patient patient = new Patient();
patient.setId("PING1");
patient.setGender(Enumerations.AdministrativeGender.FEMALE);
@@ -601,6 +635,13 @@ public class BulkExportUseCaseIT extends BaseResourceProviderR4Test {
Map> secondMap = convertJobResultsToResources(altBulkExportResults);
assertThat(secondMap.get("Patient"), hasSize(1));
assertThat(secondMap.get("Coverage"), hasSize(1));
+
+ runInTransaction(()->{
+ List instances = myJobInstanceRepository.findAll();
+ ourLog.info("Job instance states:\n * {}", instances.stream().map(Object::toString).collect(Collectors.joining("\n * ")));
+ List workChunks = myWorkChunkRepository.findAll();
+ ourLog.info("Work chunks instance states:\n * {}", workChunks.stream().map(Object::toString).collect(Collectors.joining("\n * ")));
+ });
}
@@ -1128,7 +1169,7 @@ public class BulkExportUseCaseIT extends BaseResourceProviderR4Test {
myBatch2JobHelper.awaitJobCompletion(startResponse.getJobId(), 60);
- await().atMost(300, TimeUnit.SECONDS).until(() -> myJobRunner.getJobInfo(startResponse.getJobId()).getReport() != null);
+ assertNotNull(myJobRunner.getJobInfo(startResponse.getJobId()).getReport());
String report = myJobRunner.getJobInfo(startResponse.getJobId()).getReport();
BulkExportJobResults results = JsonUtil.deserialize(report, BulkExportJobResults.class);
@@ -1177,4 +1218,10 @@ public class BulkExportUseCaseIT extends BaseResourceProviderR4Test {
private BulkExportJobResults startPatientBulkExportJobAndAwaitResults(HashSet theTypes, HashSet theFilters, String thePatientId) {
return startBulkExportJobAndAwaitCompletion(BulkDataExportOptions.ExportStyle.PATIENT, theTypes, theFilters, thePatientId);
}
+
+ @Autowired
+ private IBatch2JobInstanceRepository myJobInstanceRepository;
+ @Autowired
+ private IBatch2WorkChunkRepository myWorkChunkRepository;
+
}
diff --git a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/bulk/export/ExpandResourcesStepJpaTest.java b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/bulk/export/ExpandResourcesStepJpaTest.java
new file mode 100644
index 00000000000..f99a8769215
--- /dev/null
+++ b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/bulk/export/ExpandResourcesStepJpaTest.java
@@ -0,0 +1,111 @@
+package ca.uhn.fhir.jpa.bulk.export;
+
+import ca.uhn.fhir.batch2.api.IJobDataSink;
+import ca.uhn.fhir.batch2.api.StepExecutionDetails;
+import ca.uhn.fhir.batch2.jobs.export.ExpandResourcesStep;
+import ca.uhn.fhir.batch2.jobs.export.models.BulkExportJobParameters;
+import ca.uhn.fhir.batch2.jobs.export.models.ExpandedResourcesList;
+import ca.uhn.fhir.batch2.jobs.export.models.ResourceIdList;
+import ca.uhn.fhir.batch2.jobs.models.BatchResourceId;
+import ca.uhn.fhir.batch2.model.JobInstance;
+import ca.uhn.fhir.batch2.model.WorkChunkData;
+import ca.uhn.fhir.jpa.api.config.DaoConfig;
+import ca.uhn.fhir.jpa.test.BaseJpaR4Test;
+import ca.uhn.fhir.rest.server.interceptor.ResponseSizeCapturingInterceptor;
+import org.hl7.fhir.r4.model.Patient;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.CsvFileSource;
+import org.junit.jupiter.params.provider.CsvSource;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Captor;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.springframework.beans.factory.annotation.Autowired;
+
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.containsString;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+public class ExpandResourcesStepJpaTest extends BaseJpaR4Test {
+
+ @Autowired
+ private ExpandResourcesStep myExpandResourcesStep;
+
+ @Mock
+ private IJobDataSink mySink;
+ @Captor
+ private ArgumentCaptor myWorkChunkCaptor;
+
+ @Override
+ public void afterCleanupDao() {
+ super.afterCleanupDao();
+
+ myDaoConfig.setTagStorageMode(new DaoConfig().getTagStorageMode());
+ }
+
+ /**
+ * Make sure we load inline tags efficiently when generating bulk export
+ */
+ @ParameterizedTest
+ @CsvSource({"INLINE,2", "NON_VERSIONED,3", "VERSIONED,3"})
+ public void testBulkExportExpandResourcesStep(DaoConfig.TagStorageModeEnum theTagStorageMode, int theExpectedSelectQueries) {
+ // Setup
+
+ myDaoConfig.setTagStorageMode(theTagStorageMode);
+
+ int count = 10;
+ List ids = IntStream.range(0, count)
+ .boxed()
+ .map(t -> {
+ Patient p = new Patient();
+ p.getMeta().addTag().setSystem("http://static").setCode("tag");
+ p.getMeta().addTag().setSystem("http://dynamic").setCode("tag" + t);
+ return myPatientDao.create(p, mySrd).getId().getIdPartAsLong();
+ }).toList();
+ assertEquals(count, ids.size());
+
+ ResourceIdList resourceList = new ResourceIdList();
+ resourceList.setResourceType("Patient");
+ resourceList.setIds(ids.stream().map(t->new BatchResourceId().setResourceType("Patient").setId(Long.toString(t))).toList());
+
+ BulkExportJobParameters params = new BulkExportJobParameters();
+ JobInstance jobInstance = new JobInstance();
+ String chunkId = "ABC";
+
+ StepExecutionDetails details = new StepExecutionDetails<>(params, resourceList, jobInstance, chunkId);
+
+ // Test
+
+ myCaptureQueriesListener.clear();
+ myExpandResourcesStep.run(details, mySink);
+
+ // Verify
+
+ verify(mySink, times(1)).accept(myWorkChunkCaptor.capture());
+ ExpandedResourcesList expandedResourceList = myWorkChunkCaptor.getValue();
+ assertEquals(10, expandedResourceList.getStringifiedResources().size());
+ assertThat(expandedResourceList.getStringifiedResources().get(0), containsString("{\"system\":\"http://static\",\"code\":\"tag\"}"));
+ assertThat(expandedResourceList.getStringifiedResources().get(0), containsString("{\"system\":\"http://dynamic\",\"code\":\"tag0\"}"));
+ assertThat(expandedResourceList.getStringifiedResources().get(1), containsString("{\"system\":\"http://static\",\"code\":\"tag\"}"));
+ assertThat(expandedResourceList.getStringifiedResources().get(1), containsString("{\"system\":\"http://dynamic\",\"code\":\"tag1\"}"));
+
+ // Verify query counts
+ assertEquals(theExpectedSelectQueries, myCaptureQueriesListener.countSelectQueries());
+ assertEquals(0, myCaptureQueriesListener.countInsertQueries());
+ assertEquals(0, myCaptureQueriesListener.countUpdateQueries());
+ assertEquals(0, myCaptureQueriesListener.countDeleteQueries());
+ assertEquals(2, myCaptureQueriesListener.countCommits());
+ assertEquals(0, myCaptureQueriesListener.countRollbacks());
+
+ }
+
+
+
+}
diff --git a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/dao/r4/FhirResourceDaoR4SearchSqlTest.java b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/dao/r4/FhirResourceDaoR4SearchSqlTest.java
index 25a9c07a218..1924ada91fd 100644
--- a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/dao/r4/FhirResourceDaoR4SearchSqlTest.java
+++ b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/dao/r4/FhirResourceDaoR4SearchSqlTest.java
@@ -1,5 +1,9 @@
package ca.uhn.fhir.jpa.dao.r4;
+import ca.uhn.fhir.batch2.api.StepExecutionDetails;
+import ca.uhn.fhir.batch2.jobs.export.ExpandResourcesStep;
+import ca.uhn.fhir.batch2.jobs.export.models.BulkExportJobParameters;
+import ca.uhn.fhir.batch2.jobs.export.models.ResourceIdList;
import ca.uhn.fhir.jpa.api.config.DaoConfig;
import ca.uhn.fhir.jpa.searchparam.SearchParameterMap;
import ca.uhn.fhir.jpa.test.BaseJpaR4Test;
@@ -17,8 +21,13 @@ import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.annotation.Autowired;
+import java.util.ArrayList;
+import java.util.List;
import java.util.UUID;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.containsString;
@@ -109,7 +118,7 @@ public class FhirResourceDaoR4SearchSqlTest extends BaseJpaR4Test {
boolean reindexParamCache = myDaoConfig.isMarkResourcesForReindexingUponSearchParameterChange();
myDaoConfig.setMarkResourcesForReindexingUponSearchParameterChange(false);
- SearchParameter searchParameter = FhirResourceDaoR4TagsTest.createSearchParamForInlineResourceProfile();
+ SearchParameter searchParameter = FhirResourceDaoR4TagsInlineTest.createSearchParameterForInlineProfile();
ourLog.debug("SearchParam:\n{}", myFhirContext.newJsonParser().setPrettyPrint(true).encodeResourceToString(searchParameter));
mySearchParameterDao.update(searchParameter, mySrd);
mySearchParamRegistry.forceRefresh();
diff --git a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/dao/r4/FhirResourceDaoR4TagsInlineTest.java b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/dao/r4/FhirResourceDaoR4TagsInlineTest.java
new file mode 100644
index 00000000000..6d83ae28ede
--- /dev/null
+++ b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/dao/r4/FhirResourceDaoR4TagsInlineTest.java
@@ -0,0 +1,236 @@
+package ca.uhn.fhir.jpa.dao.r4;
+
+import ca.uhn.fhir.context.FhirContext;
+import ca.uhn.fhir.jpa.api.config.DaoConfig;
+import ca.uhn.fhir.jpa.provider.BaseResourceProviderR4Test;
+import ca.uhn.fhir.rest.gclient.TokenClientParam;
+import org.hl7.fhir.r4.model.Bundle;
+import org.hl7.fhir.r4.model.Enumerations;
+import org.hl7.fhir.r4.model.IdType;
+import org.hl7.fhir.r4.model.Patient;
+import org.hl7.fhir.r4.model.SearchParameter;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
+
+import javax.annotation.Nonnull;
+
+import static ca.uhn.fhir.jpa.dao.r4.FhirResourceDaoR4TagsTest.toProfiles;
+import static ca.uhn.fhir.jpa.dao.r4.FhirResourceDaoR4TagsTest.toSecurityLabels;
+import static ca.uhn.fhir.jpa.dao.r4.FhirResourceDaoR4TagsTest.toTags;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.contains;
+import static org.hamcrest.Matchers.containsInAnyOrder;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+@SuppressWarnings({"Duplicates"})
+public class FhirResourceDaoR4TagsInlineTest extends BaseResourceProviderR4Test {
+
+ private static final org.slf4j.Logger ourLog = org.slf4j.LoggerFactory.getLogger(FhirResourceDaoR4TagsInlineTest.class);
+
+ @Override
+ @AfterEach
+ public final void after() throws Exception {
+ super.after();
+ myDaoConfig.setTagStorageMode(DaoConfig.DEFAULT_TAG_STORAGE_MODE);
+ }
+
+
+ @Test
+ public void testInlineTags_StoreAndRetrieve() {
+ myDaoConfig.setTagStorageMode(DaoConfig.TagStorageModeEnum.INLINE);
+
+ // Store a first version
+ Patient patient = new Patient();
+ patient.setId("Patient/A");
+ patient.getMeta().addProfile("http://profile1");
+ patient.getMeta().addTag("http://tag1", "vtag1", "dtag1");
+ patient.getMeta().addSecurity("http://sec1", "vsec1", "dsec1");
+ patient.setActive(true);
+ myPatientDao.update(patient, mySrd);
+
+ runInTransaction(() -> {
+ assertEquals(0, myResourceTagDao.count());
+ assertEquals(0, myResourceHistoryTagDao.count());
+ assertEquals(0, myTagDefinitionDao.count());
+ });
+
+ // Read it back
+ patient = myPatientDao.read(new IdType("Patient/A/_history/1"), mySrd);
+ assertThat(toProfiles(patient).toString(), toProfiles(patient), contains("http://profile1"));
+ assertThat(toTags(patient).toString(), toTags(patient), contains("http://tag1|vtag1|dtag1"));
+ assertThat(toSecurityLabels(patient).toString(), toSecurityLabels(patient), contains("http://sec1|vsec1|dsec1"));
+
+ // Store a second version
+ patient = new Patient();
+ patient.setId("Patient/A");
+ patient.getMeta().addProfile("http://profile2");
+ patient.getMeta().addTag("http://tag2", "vtag2", "dtag2");
+ patient.getMeta().addSecurity("http://sec2", "vsec2", "dsec2");
+ patient.setActive(true);
+ myPatientDao.update(patient, mySrd);
+
+ runInTransaction(() -> {
+ assertEquals(0, myResourceTagDao.count());
+ assertEquals(0, myResourceHistoryTagDao.count());
+ assertEquals(0, myTagDefinitionDao.count());
+ });
+
+ // First version should have only the initial tags
+ patient = myPatientDao.read(new IdType("Patient/A/_history/1"), mySrd);
+ assertThat(toProfiles(patient).toString(), toProfiles(patient), contains("http://profile1"));
+ assertThat(toTags(patient).toString(), toTags(patient), contains("http://tag1|vtag1|dtag1"));
+ assertThat(toSecurityLabels(patient).toString(), toSecurityLabels(patient), contains("http://sec1|vsec1|dsec1"));
+
+ // Second version should have the new set of tags
+ // TODO: We could copy these forward like we do for non-inline mode. Perhaps in the future.
+ patient = myPatientDao.read(new IdType("Patient/A/_history/2"), mySrd);
+ assertThat(toProfiles(patient).toString(), toProfiles(patient), contains("http://profile2"));
+ assertThat(toTags(patient).toString(), toTags(patient), containsInAnyOrder("http://tag2|vtag2|dtag2"));
+ assertThat(toSecurityLabels(patient).toString(), toSecurityLabels(patient), containsInAnyOrder("http://sec2|vsec2|dsec2"));
+
+ }
+
+
+ @Test
+ public void testInlineTags_Search_Tag() {
+ myDaoConfig.setTagStorageMode(DaoConfig.TagStorageModeEnum.INLINE);
+
+ SearchParameter searchParameter = createSearchParameterForInlineTag();
+ ourLog.debug("SearchParam:\n{}", myFhirContext.newJsonParser().setPrettyPrint(true).encodeResourceToString(searchParameter));
+ mySearchParameterDao.update(searchParameter, mySrd);
+ mySearchParamRegistry.forceRefresh();
+
+ createPatientsForInlineSearchTests();
+
+ logAllTokenIndexes();
+
+ // Perform a search
+ Bundle outcome = myClient.search().forResource("Patient").where(new TokenClientParam("_tag").exactly().systemAndCode("http://tag1", "vtag1")).returnBundle(Bundle.class).execute();
+ assertThat(toUnqualifiedVersionlessIdValues(outcome), containsInAnyOrder("Patient/A", "Patient/B"));
+
+ validatePatientSearchResultsForInlineTags(outcome);
+ }
+
+ @Test
+ public void testInlineTags_Search_Profile() {
+ myDaoConfig.setTagStorageMode(DaoConfig.TagStorageModeEnum.INLINE);
+
+ SearchParameter searchParameter = createSearchParameterForInlineProfile();
+ ourLog.debug("SearchParam:\n{}", myFhirContext.newJsonParser().setPrettyPrint(true).encodeResourceToString(searchParameter));
+ mySearchParameterDao.update(searchParameter, mySrd);
+ mySearchParamRegistry.forceRefresh();
+
+ createPatientsForInlineSearchTests();
+
+ logAllTokenIndexes();
+
+ // Perform a search
+ Bundle outcome = myClient.search().forResource("Patient").where(new TokenClientParam("_profile").exactly().code("http://profile1")).returnBundle(Bundle.class).execute();
+ assertThat(toUnqualifiedVersionlessIdValues(outcome), containsInAnyOrder("Patient/A", "Patient/B"));
+ validatePatientSearchResultsForInlineTags(outcome);
+ }
+
+ @Test
+ public void testInlineTags_Search_Security() {
+ myDaoConfig.setTagStorageMode(DaoConfig.TagStorageModeEnum.INLINE);
+
+ SearchParameter searchParameter = createSearchParameterForInlineSecurity();
+ ourLog.debug("SearchParam:\n{}", myFhirContext.newJsonParser().setPrettyPrint(true).encodeResourceToString(searchParameter));
+ mySearchParameterDao.update(searchParameter, mySrd);
+ mySearchParamRegistry.forceRefresh();
+
+ createPatientsForInlineSearchTests();
+
+ logAllTokenIndexes();
+
+ // Perform a search
+ Bundle outcome = myClient.search().forResource("Patient").where(new TokenClientParam("_security").exactly().systemAndCode("http://sec1", "vsec1")).returnBundle(Bundle.class).execute();
+ assertThat(toUnqualifiedVersionlessIdValues(outcome), containsInAnyOrder("Patient/A", "Patient/B"));
+
+ validatePatientSearchResultsForInlineTags(outcome);
+ }
+
+ private void validatePatientSearchResultsForInlineTags(Bundle outcome) {
+ Patient patient;
+ patient = (Patient) outcome.getEntry().get(0).getResource();
+ assertThat(toProfiles(patient).toString(), toProfiles(patient), contains("http://profile1"));
+ assertThat(toTags(patient).toString(), toTags(patient), contains("http://tag1|vtag1|dtag1"));
+ assertThat(toSecurityLabels(patient).toString(), toSecurityLabels(patient), contains("http://sec1|vsec1|dsec1"));
+ patient = (Patient) outcome.getEntry().get(1).getResource();
+ assertThat(toProfiles(patient).toString(), toProfiles(patient), contains("http://profile1"));
+ assertThat(toTags(patient).toString(), toTags(patient), contains("http://tag1|vtag1|dtag1"));
+ assertThat(toSecurityLabels(patient).toString(), toSecurityLabels(patient), contains("http://sec1|vsec1|dsec1"));
+ }
+
+ private void createPatientsForInlineSearchTests() {
+ Patient patient = new Patient();
+ patient.setId("Patient/A");
+ patient.getMeta().addProfile("http://profile1");
+ patient.getMeta().addTag("http://tag1", "vtag1", "dtag1");
+ patient.getMeta().addSecurity("http://sec1", "vsec1", "dsec1");
+ patient.setActive(true);
+ myPatientDao.update(patient, mySrd);
+
+ patient = new Patient();
+ patient.setId("Patient/B");
+ patient.getMeta().addProfile("http://profile1");
+ patient.getMeta().addTag("http://tag1", "vtag1", "dtag1");
+ patient.getMeta().addSecurity("http://sec1", "vsec1", "dsec1");
+ patient.setActive(true);
+ myPatientDao.update(patient, mySrd);
+
+ patient = new Patient();
+ patient.setId("Patient/NO");
+ patient.getMeta().addProfile("http://profile99");
+ patient.getMeta().addTag("http://tag99", "vtag99", "dtag99");
+ patient.getMeta().addSecurity("http://sec99", "vsec99", "dsec99");
+ patient.setActive(true);
+ myPatientDao.update(patient, mySrd);
+ }
+
+ @Nonnull
+ public static SearchParameter createSearchParameterForInlineTag() {
+ SearchParameter searchParameter = new SearchParameter();
+ searchParameter.setId("SearchParameter/resource-tag");
+ for (String next : FhirContext.forR4Cached().getResourceTypes().stream().sorted().toList()) {
+ searchParameter.addBase(next);
+ }
+ searchParameter.setStatus(Enumerations.PublicationStatus.ACTIVE);
+ searchParameter.setType(Enumerations.SearchParamType.TOKEN);
+ searchParameter.setCode("_tag");
+ searchParameter.setName("Tag");
+ searchParameter.setExpression("meta.tag");
+ return searchParameter;
+ }
+
+ @Nonnull
+ public static SearchParameter createSearchParameterForInlineSecurity() {
+ SearchParameter searchParameter = new SearchParameter();
+ searchParameter.setId("SearchParameter/resource-security");
+ for (String next : FhirContext.forR4Cached().getResourceTypes().stream().sorted().toList()) {
+ searchParameter.addBase(next);
+ }
+ searchParameter.setStatus(Enumerations.PublicationStatus.ACTIVE);
+ searchParameter.setType(Enumerations.SearchParamType.TOKEN);
+ searchParameter.setCode("_security");
+ searchParameter.setName("Security");
+ searchParameter.setExpression("meta.security");
+ return searchParameter;
+ }
+
+ @Nonnull
+ public static SearchParameter createSearchParameterForInlineProfile() {
+ SearchParameter searchParameter = new SearchParameter();
+ searchParameter.setId("SearchParameter/resource-profile");
+ for (String next : FhirContext.forR4Cached().getResourceTypes().stream().sorted().toList()) {
+ searchParameter.addBase(next);
+ }
+ searchParameter.setStatus(Enumerations.PublicationStatus.ACTIVE);
+ searchParameter.setType(Enumerations.SearchParamType.URI);
+ searchParameter.setCode("_profile");
+ searchParameter.setName("Profile");
+ searchParameter.setExpression("meta.profile");
+ return searchParameter;
+ }
+
+}
diff --git a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/dao/r4/FhirResourceDaoR4TagsTest.java b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/dao/r4/FhirResourceDaoR4TagsTest.java
index d6215c5a62d..28bf2e71f74 100644
--- a/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/dao/r4/FhirResourceDaoR4TagsTest.java
+++ b/hapi-fhir-jpaserver-test-r4/src/test/java/ca/uhn/fhir/jpa/dao/r4/FhirResourceDaoR4TagsTest.java
@@ -351,82 +351,6 @@ public class FhirResourceDaoR4TagsTest extends BaseResourceProviderR4Test {
}
- @Test
- public void testInlineTags_StoreAndRetrieve() {
- myDaoConfig.setTagStorageMode(DaoConfig.TagStorageModeEnum.INLINE);
-
- // Store a first version
- Patient patient = new Patient();
- patient.setId("Patient/A");
- patient.getMeta().addProfile("http://profile1");
- patient.getMeta().addTag("http://tag1", "vtag1", "dtag1");
- patient.getMeta().addSecurity("http://sec1", "vsec1", "dsec1");
- patient.setActive(true);
- myPatientDao.update(patient, mySrd);
-
- runInTransaction(() -> {
- assertEquals(0, myResourceTagDao.count());
- assertEquals(0, myResourceHistoryTagDao.count());
- assertEquals(0, myTagDefinitionDao.count());
- });
-
- // Read it back
- patient = myPatientDao.read(new IdType("Patient/A/_history/1"), mySrd);
- assertThat(toProfiles(patient).toString(), toProfiles(patient), contains("http://profile1"));
- assertThat(toTags(patient).toString(), toTags(patient), contains("http://tag1|vtag1|dtag1"));
- assertThat(toSecurityLabels(patient).toString(), toSecurityLabels(patient), contains("http://sec1|vsec1|dsec1"));
-
- // Store a second version
- patient = new Patient();
- patient.setId("Patient/A");
- patient.getMeta().addProfile("http://profile2");
- patient.getMeta().addTag("http://tag2", "vtag2", "dtag2");
- patient.getMeta().addSecurity("http://sec2", "vsec2", "dsec2");
- patient.setActive(true);
- myPatientDao.update(patient, mySrd);
-
- runInTransaction(() -> {
- assertEquals(0, myResourceTagDao.count());
- assertEquals(0, myResourceHistoryTagDao.count());
- assertEquals(0, myTagDefinitionDao.count());
- });
-
- // First version should have only the initial tags
- patient = myPatientDao.read(new IdType("Patient/A/_history/1"), mySrd);
- assertThat(toProfiles(patient).toString(), toProfiles(patient), contains("http://profile1"));
- assertThat(toTags(patient).toString(), toTags(patient), contains("http://tag1|vtag1|dtag1"));
- assertThat(toSecurityLabels(patient).toString(), toSecurityLabels(patient), contains("http://sec1|vsec1|dsec1"));
-
- // Second version should have the new set of tags
- // TODO: We could copy these forward like we do for non-inline mode. Perhaps in the future.
- patient = myPatientDao.read(new IdType("Patient/A/_history/2"), mySrd);
- assertThat(toProfiles(patient).toString(), toProfiles(patient), contains("http://profile2"));
- assertThat(toTags(patient).toString(), toTags(patient), containsInAnyOrder("http://tag2|vtag2|dtag2"));
- assertThat(toSecurityLabels(patient).toString(), toSecurityLabels(patient), containsInAnyOrder("http://sec2|vsec2|dsec2"));
-
- }
-
-
- @Test
- public void testInlineTags_Search_Tag() {
- myDaoConfig.setTagStorageMode(DaoConfig.TagStorageModeEnum.INLINE);
-
- SearchParameter searchParameter = createResourceTagSearchParameter();
- ourLog.debug("SearchParam:\n{}", myFhirContext.newJsonParser().setPrettyPrint(true).encodeResourceToString(searchParameter));
- mySearchParameterDao.update(searchParameter, mySrd);
- mySearchParamRegistry.forceRefresh();
-
- createPatientsForInlineSearchTests();
-
- logAllTokenIndexes();
-
- // Perform a search
- Bundle outcome = myClient.search().forResource("Patient").where(new TokenClientParam("_tag").exactly().systemAndCode("http://tag1", "vtag1")).returnBundle(Bundle.class).execute();
- assertThat(toUnqualifiedVersionlessIdValues(outcome), containsInAnyOrder("Patient/A", "Patient/B"));
-
- validatePatientSearchResultsForInlineTags(outcome);
- }
-
@Test
public void testMetaDelete_TagStorageModeNonVersioned_ShouldShowRemainingTagsInGetAllResources() {
myDaoConfig.setTagStorageMode(DaoConfig.TagStorageModeEnum.NON_VERSIONED);
@@ -472,91 +396,7 @@ public class FhirResourceDaoR4TagsTest extends BaseResourceProviderR4Test {
assertEquals(1, patient.getMeta().getTag().size());
}
- @Test
- public void testInlineTags_Search_Profile() {
- myDaoConfig.setTagStorageMode(DaoConfig.TagStorageModeEnum.INLINE);
- SearchParameter searchParameter = createSearchParamForInlineResourceProfile();
- ourLog.debug("SearchParam:\n{}", myFhirContext.newJsonParser().setPrettyPrint(true).encodeResourceToString(searchParameter));
- mySearchParameterDao.update(searchParameter, mySrd);
- mySearchParamRegistry.forceRefresh();
-
- createPatientsForInlineSearchTests();
-
- logAllTokenIndexes();
-
- // Perform a search
- Bundle outcome = myClient.search().forResource("Patient").where(new TokenClientParam("_profile").exactly().code("http://profile1")).returnBundle(Bundle.class).execute();
- assertThat(toUnqualifiedVersionlessIdValues(outcome), containsInAnyOrder("Patient/A", "Patient/B"));
- validatePatientSearchResultsForInlineTags(outcome);
- }
-
- @Test
- public void testInlineTags_Search_Security() {
- myDaoConfig.setTagStorageMode(DaoConfig.TagStorageModeEnum.INLINE);
-
- SearchParameter searchParameter = new SearchParameter();
- searchParameter.setId("SearchParameter/resource-security");
- for (String next : myFhirContext.getResourceTypes().stream().sorted().collect(Collectors.toList())) {
- searchParameter.addBase(next);
- }
- searchParameter.setStatus(Enumerations.PublicationStatus.ACTIVE);
- searchParameter.setType(Enumerations.SearchParamType.TOKEN);
- searchParameter.setCode("_security");
- searchParameter.setName("Security");
- searchParameter.setExpression("meta.security");
- ourLog.debug("SearchParam:\n{}", myFhirContext.newJsonParser().setPrettyPrint(true).encodeResourceToString(searchParameter));
- mySearchParameterDao.update(searchParameter, mySrd);
- mySearchParamRegistry.forceRefresh();
-
- createPatientsForInlineSearchTests();
-
- logAllTokenIndexes();
-
- // Perform a search
- Bundle outcome = myClient.search().forResource("Patient").where(new TokenClientParam("_security").exactly().systemAndCode("http://sec1", "vsec1")).returnBundle(Bundle.class).execute();
- assertThat(toUnqualifiedVersionlessIdValues(outcome), containsInAnyOrder("Patient/A", "Patient/B"));
-
- validatePatientSearchResultsForInlineTags(outcome);
- }
-
- private void validatePatientSearchResultsForInlineTags(Bundle outcome) {
- Patient patient;
- patient = (Patient) outcome.getEntry().get(0).getResource();
- assertThat(toProfiles(patient).toString(), toProfiles(patient), contains("http://profile1"));
- assertThat(toTags(patient).toString(), toTags(patient), contains("http://tag1|vtag1|dtag1"));
- assertThat(toSecurityLabels(patient).toString(), toSecurityLabels(patient), contains("http://sec1|vsec1|dsec1"));
- patient = (Patient) outcome.getEntry().get(1).getResource();
- assertThat(toProfiles(patient).toString(), toProfiles(patient), contains("http://profile1"));
- assertThat(toTags(patient).toString(), toTags(patient), contains("http://tag1|vtag1|dtag1"));
- assertThat(toSecurityLabels(patient).toString(), toSecurityLabels(patient), contains("http://sec1|vsec1|dsec1"));
- }
-
- private void createPatientsForInlineSearchTests() {
- Patient patient = new Patient();
- patient.setId("Patient/A");
- patient.getMeta().addProfile("http://profile1");
- patient.getMeta().addTag("http://tag1", "vtag1", "dtag1");
- patient.getMeta().addSecurity("http://sec1", "vsec1", "dsec1");
- patient.setActive(true);
- myPatientDao.update(patient, mySrd);
-
- patient = new Patient();
- patient.setId("Patient/B");
- patient.getMeta().addProfile("http://profile1");
- patient.getMeta().addTag("http://tag1", "vtag1", "dtag1");
- patient.getMeta().addSecurity("http://sec1", "vsec1", "dsec1");
- patient.setActive(true);
- myPatientDao.update(patient, mySrd);
-
- patient = new Patient();
- patient.setId("Patient/NO");
- patient.getMeta().addProfile("http://profile99");
- patient.getMeta().addTag("http://tag99", "vtag99", "dtag99");
- patient.getMeta().addSecurity("http://sec99", "vsec99", "dsec99");
- patient.setActive(true);
- myPatientDao.update(patient, mySrd);
- }
private void initializeNonVersioned() {
myDaoConfig.setTagStorageMode(DaoConfig.TagStorageModeEnum.NON_VERSIONED);
@@ -595,63 +435,33 @@ public class FhirResourceDaoR4TagsTest extends BaseResourceProviderR4Test {
}
@Nonnull
- private List toTags(Patient patient) {
+ static List toTags(Patient patient) {
return toTags(patient.getMeta());
}
@Nonnull
- private List toSecurityLabels(Patient patient) {
+ static List toSecurityLabels(Patient patient) {
return toSecurityLabels(patient.getMeta());
}
@Nonnull
- private List toProfiles(Patient patient) {
+ static List toProfiles(Patient patient) {
return toProfiles(patient.getMeta());
}
@Nonnull
- private static List toTags(Meta meta) {
+ static List toTags(Meta meta) {
return meta.getTag().stream().map(t -> t.getSystem() + "|" + t.getCode() + "|" + t.getDisplay()).collect(Collectors.toList());
}
@Nonnull
- private static List toSecurityLabels(Meta meta) {
+ static List toSecurityLabels(Meta meta) {
return meta.getSecurity().stream().map(t -> t.getSystem() + "|" + t.getCode() + "|" + t.getDisplay()).collect(Collectors.toList());
}
@Nonnull
- private static List toProfiles(Meta meta) {
+ static List toProfiles(Meta meta) {
return meta.getProfile().stream().map(t -> t.getValue()).collect(Collectors.toList());
}
- @Nonnull
- public static SearchParameter createSearchParamForInlineResourceProfile() {
- SearchParameter searchParameter = new SearchParameter();
- searchParameter.setId("SearchParameter/resource-profile");
- for (String next : FhirContext.forR4Cached().getResourceTypes().stream().sorted().collect(Collectors.toList())) {
- searchParameter.addBase(next);
- }
- searchParameter.setStatus(Enumerations.PublicationStatus.ACTIVE);
- searchParameter.setType(Enumerations.SearchParamType.URI);
- searchParameter.setCode("_profile");
- searchParameter.setName("Profile");
- searchParameter.setExpression("meta.profile");
- return searchParameter;
- }
-
- @Nonnull
- public static SearchParameter createResourceTagSearchParameter() {
- SearchParameter searchParameter = new SearchParameter();
- searchParameter.setId("SearchParameter/resource-tag");
- for (String next : FhirContext.forR4Cached().getResourceTypes().stream().sorted().collect(Collectors.toList())) {
- searchParameter.addBase(next);
- }
- searchParameter.setStatus(Enumerations.PublicationStatus.ACTIVE);
- searchParameter.setType(Enumerations.SearchParamType.TOKEN);
- searchParameter.setCode("_tag");
- searchParameter.setName("Tag");
- searchParameter.setExpression("meta.tag");
- return searchParameter;
- }
-
}
diff --git a/hapi-fhir-jpaserver-test-r4b/pom.xml b/hapi-fhir-jpaserver-test-r4b/pom.xml
index ef78168c1eb..b4411fa2569 100644
--- a/hapi-fhir-jpaserver-test-r4b/pom.xml
+++ b/hapi-fhir-jpaserver-test-r4b/pom.xml
@@ -6,7 +6,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 6.4.2
+ 6.4.2-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-jpaserver-test-r5/pom.xml b/hapi-fhir-jpaserver-test-r5/pom.xml
index a1c05294df3..154cf2025c7 100644
--- a/hapi-fhir-jpaserver-test-r5/pom.xml
+++ b/hapi-fhir-jpaserver-test-r5/pom.xml
@@ -6,7 +6,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 6.4.2
+ 6.4.2-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-jpaserver-test-utilities/pom.xml b/hapi-fhir-jpaserver-test-utilities/pom.xml
index a3218db7a11..4fe444f1824 100644
--- a/hapi-fhir-jpaserver-test-utilities/pom.xml
+++ b/hapi-fhir-jpaserver-test-utilities/pom.xml
@@ -6,7 +6,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 6.4.2
+ 6.4.2-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-jpaserver-uhnfhirtest/pom.xml b/hapi-fhir-jpaserver-uhnfhirtest/pom.xml
index 2210ffee64c..7ed92bb0782 100644
--- a/hapi-fhir-jpaserver-uhnfhirtest/pom.xml
+++ b/hapi-fhir-jpaserver-uhnfhirtest/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-fhir
- 6.4.2
+ 6.4.2-SNAPSHOT
../pom.xml
diff --git a/hapi-fhir-server-mdm/pom.xml b/hapi-fhir-server-mdm/pom.xml
index 75ec956b7b0..45300ada6d6 100644
--- a/hapi-fhir-server-mdm/pom.xml
+++ b/hapi-fhir-server-mdm/pom.xml
@@ -7,7 +7,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 6.4.2
+ 6.4.2-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-server-openapi/pom.xml b/hapi-fhir-server-openapi/pom.xml
index 1da7b23540f..608a290b634 100644
--- a/hapi-fhir-server-openapi/pom.xml
+++ b/hapi-fhir-server-openapi/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 6.4.2
+ 6.4.2-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-server/pom.xml b/hapi-fhir-server/pom.xml
index 6b27590cf16..dd33fabc19f 100644
--- a/hapi-fhir-server/pom.xml
+++ b/hapi-fhir-server/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 6.4.2
+ 6.4.2-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-serviceloaders/hapi-fhir-caching-api/pom.xml b/hapi-fhir-serviceloaders/hapi-fhir-caching-api/pom.xml
index d122dab6e93..5c6c01652dc 100644
--- a/hapi-fhir-serviceloaders/hapi-fhir-caching-api/pom.xml
+++ b/hapi-fhir-serviceloaders/hapi-fhir-caching-api/pom.xml
@@ -7,7 +7,7 @@
hapi-fhir-serviceloaders
ca.uhn.hapi.fhir
- 6.4.2
+ 6.4.2-SNAPSHOT
../pom.xml
diff --git a/hapi-fhir-serviceloaders/hapi-fhir-caching-caffeine/pom.xml b/hapi-fhir-serviceloaders/hapi-fhir-caching-caffeine/pom.xml
index 0df23875bfc..ab128412090 100644
--- a/hapi-fhir-serviceloaders/hapi-fhir-caching-caffeine/pom.xml
+++ b/hapi-fhir-serviceloaders/hapi-fhir-caching-caffeine/pom.xml
@@ -7,7 +7,7 @@
hapi-fhir-serviceloaders
ca.uhn.hapi.fhir
- 6.4.2
+ 6.4.2-SNAPSHOT
../pom.xml
@@ -20,7 +20,7 @@
ca.uhn.hapi.fhir
hapi-fhir-caching-api
- 6.4.2
+ 6.4.2-SNAPSHOT
com.github.ben-manes.caffeine
diff --git a/hapi-fhir-serviceloaders/hapi-fhir-caching-guava/pom.xml b/hapi-fhir-serviceloaders/hapi-fhir-caching-guava/pom.xml
index 2fb333dde90..13dbdf0697e 100644
--- a/hapi-fhir-serviceloaders/hapi-fhir-caching-guava/pom.xml
+++ b/hapi-fhir-serviceloaders/hapi-fhir-caching-guava/pom.xml
@@ -7,7 +7,7 @@
hapi-fhir-serviceloaders
ca.uhn.hapi.fhir
- 6.4.2
+ 6.4.2-SNAPSHOT
../pom.xml
diff --git a/hapi-fhir-serviceloaders/hapi-fhir-caching-testing/pom.xml b/hapi-fhir-serviceloaders/hapi-fhir-caching-testing/pom.xml
index 77a27229c6d..196b3a6757e 100644
--- a/hapi-fhir-serviceloaders/hapi-fhir-caching-testing/pom.xml
+++ b/hapi-fhir-serviceloaders/hapi-fhir-caching-testing/pom.xml
@@ -7,7 +7,7 @@
hapi-fhir
ca.uhn.hapi.fhir
- 6.4.2
+ 6.4.2-SNAPSHOT
../../pom.xml
diff --git a/hapi-fhir-serviceloaders/pom.xml b/hapi-fhir-serviceloaders/pom.xml
index c2a06959d99..e8be60f8273 100644
--- a/hapi-fhir-serviceloaders/pom.xml
+++ b/hapi-fhir-serviceloaders/pom.xml
@@ -5,7 +5,7 @@
hapi-deployable-pom
ca.uhn.hapi.fhir
- 6.4.2
+ 6.4.2-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-spring-boot/hapi-fhir-spring-boot-autoconfigure/pom.xml b/hapi-fhir-spring-boot/hapi-fhir-spring-boot-autoconfigure/pom.xml
index 099d0e798ea..c4f8f1a7535 100644
--- a/hapi-fhir-spring-boot/hapi-fhir-spring-boot-autoconfigure/pom.xml
+++ b/hapi-fhir-spring-boot/hapi-fhir-spring-boot-autoconfigure/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 6.4.2
+ 6.4.2-SNAPSHOT
../../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-spring-boot/hapi-fhir-spring-boot-samples/hapi-fhir-spring-boot-sample-client-apache/pom.xml b/hapi-fhir-spring-boot/hapi-fhir-spring-boot-samples/hapi-fhir-spring-boot-sample-client-apache/pom.xml
index 9956d3af23b..f9a4bef65f1 100644
--- a/hapi-fhir-spring-boot/hapi-fhir-spring-boot-samples/hapi-fhir-spring-boot-sample-client-apache/pom.xml
+++ b/hapi-fhir-spring-boot/hapi-fhir-spring-boot-samples/hapi-fhir-spring-boot-sample-client-apache/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-fhir-spring-boot-samples
- 6.4.2
+ 6.4.2-SNAPSHOT
hapi-fhir-spring-boot-sample-client-apache
diff --git a/hapi-fhir-spring-boot/hapi-fhir-spring-boot-samples/hapi-fhir-spring-boot-sample-client-okhttp/pom.xml b/hapi-fhir-spring-boot/hapi-fhir-spring-boot-samples/hapi-fhir-spring-boot-sample-client-okhttp/pom.xml
index c81865b1ea5..75c44871d18 100644
--- a/hapi-fhir-spring-boot/hapi-fhir-spring-boot-samples/hapi-fhir-spring-boot-sample-client-okhttp/pom.xml
+++ b/hapi-fhir-spring-boot/hapi-fhir-spring-boot-samples/hapi-fhir-spring-boot-sample-client-okhttp/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-fhir-spring-boot-samples
- 6.4.2
+ 6.4.2-SNAPSHOT
hapi-fhir-spring-boot-sample-client-okhttp
diff --git a/hapi-fhir-spring-boot/hapi-fhir-spring-boot-samples/hapi-fhir-spring-boot-sample-server-jersey/pom.xml b/hapi-fhir-spring-boot/hapi-fhir-spring-boot-samples/hapi-fhir-spring-boot-sample-server-jersey/pom.xml
index 054d4509d80..21d60802c9d 100644
--- a/hapi-fhir-spring-boot/hapi-fhir-spring-boot-samples/hapi-fhir-spring-boot-sample-server-jersey/pom.xml
+++ b/hapi-fhir-spring-boot/hapi-fhir-spring-boot-samples/hapi-fhir-spring-boot-sample-server-jersey/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-fhir-spring-boot-samples
- 6.4.2
+ 6.4.2-SNAPSHOT
hapi-fhir-spring-boot-sample-server-jersey
diff --git a/hapi-fhir-spring-boot/hapi-fhir-spring-boot-samples/pom.xml b/hapi-fhir-spring-boot/hapi-fhir-spring-boot-samples/pom.xml
index 3a26afd6f38..eca4d212e9c 100644
--- a/hapi-fhir-spring-boot/hapi-fhir-spring-boot-samples/pom.xml
+++ b/hapi-fhir-spring-boot/hapi-fhir-spring-boot-samples/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-fhir-spring-boot
- 6.4.2
+ 6.4.2-SNAPSHOT
hapi-fhir-spring-boot-samples
diff --git a/hapi-fhir-spring-boot/hapi-fhir-spring-boot-starter/pom.xml b/hapi-fhir-spring-boot/hapi-fhir-spring-boot-starter/pom.xml
index b1ebf3cd1fd..ef15f0782c9 100644
--- a/hapi-fhir-spring-boot/hapi-fhir-spring-boot-starter/pom.xml
+++ b/hapi-fhir-spring-boot/hapi-fhir-spring-boot-starter/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 6.4.2
+ 6.4.2-SNAPSHOT
../../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-spring-boot/pom.xml b/hapi-fhir-spring-boot/pom.xml
index c74a0426606..9f384e70a18 100644
--- a/hapi-fhir-spring-boot/pom.xml
+++ b/hapi-fhir-spring-boot/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-fhir
- 6.4.2
+ 6.4.2-SNAPSHOT
../pom.xml
diff --git a/hapi-fhir-sql-migrate/pom.xml b/hapi-fhir-sql-migrate/pom.xml
index eea1668033d..c0c62526835 100644
--- a/hapi-fhir-sql-migrate/pom.xml
+++ b/hapi-fhir-sql-migrate/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 6.4.2
+ 6.4.2-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-storage-batch2-jobs/pom.xml b/hapi-fhir-storage-batch2-jobs/pom.xml
index 253ebd96f0d..2436e24d318 100644
--- a/hapi-fhir-storage-batch2-jobs/pom.xml
+++ b/hapi-fhir-storage-batch2-jobs/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 6.4.2
+ 6.4.2-SNAPSHOT
../hapi-deployable-pom/pom.xml
4.0.0
diff --git a/hapi-fhir-storage-batch2-jobs/src/main/java/ca/uhn/fhir/batch2/jobs/export/BulkExportCreateReportStep.java b/hapi-fhir-storage-batch2-jobs/src/main/java/ca/uhn/fhir/batch2/jobs/export/BulkExportCreateReportStep.java
index be03d07e1f9..c02353aadb5 100644
--- a/hapi-fhir-storage-batch2-jobs/src/main/java/ca/uhn/fhir/batch2/jobs/export/BulkExportCreateReportStep.java
+++ b/hapi-fhir-storage-batch2-jobs/src/main/java/ca/uhn/fhir/batch2/jobs/export/BulkExportCreateReportStep.java
@@ -57,13 +57,13 @@ public class BulkExportCreateReportStep implements IReductionStepWorker {
@@ -71,6 +81,9 @@ public class ExpandResourcesStep implements IJobStepWorker resources = encodeToString(allResources, jobParameters);
// set to datasink
@@ -125,12 +138,46 @@ public class ExpandResourcesStep implements IJobStepWorker fetchAllResources(ResourceIdList theIds) {
- List resources = new ArrayList<>();
+ ArrayListMultimap typeToIds = ArrayListMultimap.create();
+ theIds.getIds().forEach(t -> typeToIds.put(t.getResourceType(), t.getId()));
+
+ List resources = new ArrayList<>(theIds.getIds().size());
+
+ for (String resourceType : typeToIds.keySet()) {
+
+ IFhirResourceDao> dao = myDaoRegistry.getResourceDao(resourceType);
+ List allIds = typeToIds.get(resourceType);
+ while (!allIds.isEmpty()) {
+
+ // Load in batches in order to avoid having too many PIDs go into a
+ // single SQ statement at once
+ int batchSize = Math.min(500, allIds.size());
+
+ Set nextBatchOfPids =
+ allIds
+ .subList(0, batchSize)
+ .stream()
+ .map(t -> myIdHelperService.newPidFromStringIdAndResourceName(t, resourceType))
+ .collect(Collectors.toSet());
+ allIds = allIds.subList(batchSize, allIds.size());
+
+ PersistentIdToForcedIdMap nextBatchOfResourceIds = myTransactionService
+ .withRequest(null)
+ .execute(() -> myIdHelperService.translatePidsToForcedIds(nextBatchOfPids));
+
+ TokenOrListParam idListParam = new TokenOrListParam();
+ for (IResourcePersistentId nextPid : nextBatchOfPids) {
+ Optional resourceId = nextBatchOfResourceIds.get(nextPid);
+ idListParam.add(resourceId.orElse(nextPid.getId().toString()));
+ }
+
+ SearchParameterMap spMap = SearchParameterMap
+ .newSynchronous()
+ .add(PARAM_ID, idListParam);
+ IBundleProvider outcome = dao.search(spMap, new SystemRequestDetails());
+ resources.addAll(outcome.getAllResources());
+ }
- for (BatchResourceId batchResourceId : theIds.getIds()) {
- IFhirResourceDao> dao = myDaoRegistry.getResourceDao(batchResourceId.getResourceType());
- // This should be a query, but we have PIDs, and we don't have a _pid search param. TODO GGG, figure out how to make this search by pid.
- resources.add(dao.readByPid(myIdHelperService.newPidFromStringIdAndResourceName(batchResourceId.getId(), batchResourceId.getResourceType())));
}
return resources;
diff --git a/hapi-fhir-storage-batch2-jobs/src/test/java/ca/uhn/fhir/batch2/jobs/export/ExpandResourcesStepTest.java b/hapi-fhir-storage-batch2-jobs/src/test/java/ca/uhn/fhir/batch2/jobs/export/ExpandResourcesStepTest.java
index 1994ca85a81..85e4e28e2ab 100644
--- a/hapi-fhir-storage-batch2-jobs/src/test/java/ca/uhn/fhir/batch2/jobs/export/ExpandResourcesStepTest.java
+++ b/hapi-fhir-storage-batch2-jobs/src/test/java/ca/uhn/fhir/batch2/jobs/export/ExpandResourcesStepTest.java
@@ -12,12 +12,17 @@ import ca.uhn.fhir.batch2.model.JobInstance;
import ca.uhn.fhir.context.FhirContext;
import ca.uhn.fhir.jpa.api.dao.DaoRegistry;
import ca.uhn.fhir.jpa.api.dao.IFhirResourceDao;
+import ca.uhn.fhir.jpa.api.model.PersistentIdToForcedIdMap;
import ca.uhn.fhir.jpa.api.svc.IIdHelperService;
import ca.uhn.fhir.jpa.bulk.export.api.IBulkExportProcessor;
+import ca.uhn.fhir.jpa.dao.tx.IHapiTransactionService;
+import ca.uhn.fhir.jpa.dao.tx.NonTransactionalHapiTransactionService;
import ca.uhn.fhir.jpa.model.dao.JpaPid;
import ca.uhn.fhir.jpa.model.entity.ModelConfig;
import ca.uhn.fhir.rest.api.server.bulk.BulkDataExportOptions;
import ca.uhn.fhir.rest.api.server.storage.BaseResourcePersistentId;
+import ca.uhn.fhir.rest.api.server.storage.IResourcePersistentId;
+import ca.uhn.fhir.rest.server.SimpleBundleProvider;
import ca.uhn.fhir.rest.server.interceptor.ResponseTerminologyTranslationSvc;
import org.hl7.fhir.instance.model.api.IBaseResource;
import org.hl7.fhir.r4.model.Patient;
@@ -32,6 +37,10 @@ import org.mockito.junit.jupiter.MockitoExtension;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
@@ -62,6 +71,9 @@ public class ExpandResourcesStepTest {
@Spy
private ModelConfig myModelConfig = new ModelConfig();
+ @Spy
+ private IHapiTransactionService myTransactionService = new NonTransactionalHapiTransactionService();
+
@InjectMocks
private ExpandResourcesStep mySecondStep;
@@ -122,9 +134,17 @@ public class ExpandResourcesStepTest {
createParameters(),
instance
);
- ArrayList clone = new ArrayList<>(resources);
- when(patientDao.readByPid(any(BaseResourcePersistentId.class))).thenAnswer(i -> clone.remove(0));
+ when(patientDao.search(any(), any())).thenReturn(new SimpleBundleProvider(resources));
when(myIdHelperService.newPidFromStringIdAndResourceName(anyString(), anyString())).thenReturn(JpaPid.fromId(1L));
+ when(myIdHelperService.translatePidsToForcedIds(any())).thenAnswer(t->{
+ Set> inputSet = t.getArgument(0, Set.class);
+ Map, Optional> map = new HashMap<>();
+ for (var next : inputSet) {
+ map.put(next, Optional.empty());
+ }
+ return new PersistentIdToForcedIdMap<>(map);
+ });
+
// test
RunOutcome outcome = mySecondStep.run(input, sink);
diff --git a/hapi-fhir-storage-batch2/pom.xml b/hapi-fhir-storage-batch2/pom.xml
index 6e67be922bb..eca59445a7d 100644
--- a/hapi-fhir-storage-batch2/pom.xml
+++ b/hapi-fhir-storage-batch2/pom.xml
@@ -6,7 +6,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 6.4.2
+ 6.4.2-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/api/IJobInstance.java b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/api/IJobInstance.java
index 79278fb1f16..dcbcda32533 100644
--- a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/api/IJobInstance.java
+++ b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/api/IJobInstance.java
@@ -20,7 +20,6 @@ package ca.uhn.fhir.batch2.api;
* #L%
*/
-import ca.uhn.fhir.batch2.model.JobDefinition;
import ca.uhn.fhir.batch2.model.StatusEnum;
import java.util.Date;
@@ -56,8 +55,6 @@ public interface IJobInstance {
String getErrorMessage();
- JobDefinition> getJobDefinition();
-
boolean isCancelled();
String getReport();
diff --git a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/api/IJobPersistence.java b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/api/IJobPersistence.java
index b4a4d838ad5..6eca71408b6 100644
--- a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/api/IJobPersistence.java
+++ b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/api/IJobPersistence.java
@@ -155,7 +155,7 @@ public interface IJobPersistence {
* @param theChunkId The chunk ID
* @param theRecordsProcessed The number of records completed during chunk processing
*/
- void markWorkChunkAsCompletedAndClearData(String theChunkId, int theRecordsProcessed);
+ void markWorkChunkAsCompletedAndClearData(String theInstanceId, String theChunkId, int theRecordsProcessed);
/**
* Marks all work chunks with the provided status and erases the data
@@ -236,7 +236,7 @@ public interface IJobPersistence {
*
* @param theInstanceId The instance ID
*/
- void deleteChunks(String theInstanceId);
+ void deleteChunksAndMarkInstanceAsChunksPurged(String theInstanceId);
/**
* Marks an instance as being complete
@@ -257,4 +257,6 @@ public interface IJobPersistence {
JobOperationResultJson cancelInstance(String theInstanceId);
List fetchallchunkidsforstepWithStatus(String theInstanceId, String theStepId, StatusEnum theStatusEnum);
+
+ void updateInstanceUpdateTime(String theInstanceId);
}
diff --git a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/api/IReductionStepExecutorService.java b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/api/IReductionStepExecutorService.java
new file mode 100644
index 00000000000..fc3026a364a
--- /dev/null
+++ b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/api/IReductionStepExecutorService.java
@@ -0,0 +1,29 @@
+package ca.uhn.fhir.batch2.api;
+
+/*-
+ * #%L
+ * HAPI FHIR JPA Server - Batch2 Task Processor
+ * %%
+ * Copyright (C) 2014 - 2023 Smile CDR, Inc.
+ * %%
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * #L%
+ */
+
+import ca.uhn.fhir.batch2.model.JobWorkCursor;
+
+public interface IReductionStepExecutorService {
+ void triggerReductionStep(String theInstanceId, JobWorkCursor, ?, ?> theJobWorkCursor);
+
+ void reducerPass();
+}
diff --git a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/config/BaseBatch2Config.java b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/config/BaseBatch2Config.java
index a781bb75f6b..7614ae24741 100644
--- a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/config/BaseBatch2Config.java
+++ b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/config/BaseBatch2Config.java
@@ -23,13 +23,16 @@ package ca.uhn.fhir.batch2.config;
import ca.uhn.fhir.batch2.api.IJobCoordinator;
import ca.uhn.fhir.batch2.api.IJobMaintenanceService;
import ca.uhn.fhir.batch2.api.IJobPersistence;
+import ca.uhn.fhir.batch2.api.IReductionStepExecutorService;
import ca.uhn.fhir.batch2.channel.BatchJobSender;
import ca.uhn.fhir.batch2.coordinator.JobCoordinatorImpl;
import ca.uhn.fhir.batch2.coordinator.JobDefinitionRegistry;
+import ca.uhn.fhir.batch2.coordinator.ReductionStepExecutorServiceImpl;
import ca.uhn.fhir.batch2.coordinator.WorkChunkProcessor;
import ca.uhn.fhir.batch2.maintenance.JobMaintenanceServiceImpl;
import ca.uhn.fhir.batch2.model.JobWorkNotificationJsonMessage;
import ca.uhn.fhir.jpa.api.config.DaoConfig;
+import ca.uhn.fhir.jpa.dao.tx.IHapiTransactionService;
import ca.uhn.fhir.jpa.model.sched.ISchedulerService;
import ca.uhn.fhir.jpa.subscription.channel.api.ChannelConsumerSettings;
import ca.uhn.fhir.jpa.subscription.channel.api.ChannelProducerSettings;
@@ -39,7 +42,6 @@ import ca.uhn.fhir.jpa.subscription.channel.api.IChannelReceiver;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
-import org.springframework.transaction.PlatformTransactionManager;
@Configuration
public abstract class BaseBatch2Config {
@@ -57,8 +59,8 @@ public abstract class BaseBatch2Config {
}
@Bean
- public WorkChunkProcessor jobStepExecutorService(BatchJobSender theBatchJobSender, PlatformTransactionManager theTransactionManager) {
- return new WorkChunkProcessor(myPersistence, theBatchJobSender, theTransactionManager);
+ public WorkChunkProcessor jobStepExecutorService(BatchJobSender theBatchJobSender, IHapiTransactionService theTransactionService) {
+ return new WorkChunkProcessor(myPersistence, theBatchJobSender, theTransactionService);
}
@Bean
@@ -80,20 +82,28 @@ public abstract class BaseBatch2Config {
theJobMaintenanceService);
}
+ @Bean
+ public IReductionStepExecutorService reductionStepExecutorService(IJobPersistence theJobPersistence,
+ IHapiTransactionService theTransactionService,
+ JobDefinitionRegistry theJobDefinitionRegistry) {
+ return new ReductionStepExecutorServiceImpl(theJobPersistence, theTransactionService, theJobDefinitionRegistry);
+ }
+
@Bean
public IJobMaintenanceService batch2JobMaintenanceService(ISchedulerService theSchedulerService,
JobDefinitionRegistry theJobDefinitionRegistry,
DaoConfig theDaoConfig,
BatchJobSender theBatchJobSender,
- WorkChunkProcessor theExecutor
+ WorkChunkProcessor theExecutor,
+ IReductionStepExecutorService theReductionStepExecutorService
) {
return new JobMaintenanceServiceImpl(theSchedulerService,
myPersistence,
theDaoConfig,
theJobDefinitionRegistry,
theBatchJobSender,
- theExecutor
- );
+ theExecutor,
+ theReductionStepExecutorService);
}
@Bean
diff --git a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/JobCoordinatorImpl.java b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/JobCoordinatorImpl.java
index 53d4888fd1e..fc6b8d270c4 100644
--- a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/JobCoordinatorImpl.java
+++ b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/JobCoordinatorImpl.java
@@ -33,11 +33,11 @@ import ca.uhn.fhir.batch2.model.JobWorkNotification;
import ca.uhn.fhir.batch2.model.StatusEnum;
import ca.uhn.fhir.batch2.models.JobInstanceFetchRequest;
import ca.uhn.fhir.i18n.Msg;
-import ca.uhn.fhir.util.Logs;
import ca.uhn.fhir.jpa.batch.models.Batch2JobStartResponse;
import ca.uhn.fhir.jpa.subscription.channel.api.IChannelReceiver;
import ca.uhn.fhir.rest.server.exceptions.InvalidRequestException;
import ca.uhn.fhir.rest.server.exceptions.ResourceNotFoundException;
+import ca.uhn.fhir.util.Logs;
import org.apache.commons.lang3.Validate;
import org.slf4j.Logger;
import org.springframework.data.domain.Page;
diff --git a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/JobDefinitionRegistry.java b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/JobDefinitionRegistry.java
index 32eeff03d0f..609b6dd0db1 100644
--- a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/JobDefinitionRegistry.java
+++ b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/JobDefinitionRegistry.java
@@ -169,8 +169,9 @@ public class JobDefinitionRegistry {
return getJobDefinition(theJobInstance.getJobDefinitionId(), theJobInstance.getJobDefinitionVersion());
}
- public JobDefinition> getJobDefinitionOrThrowException(JobInstance theJobInstance) {
- return getJobDefinitionOrThrowException(theJobInstance.getJobDefinitionId(), theJobInstance.getJobDefinitionVersion());
+ @SuppressWarnings("unchecked")
+ public JobDefinition getJobDefinitionOrThrowException(JobInstance theJobInstance) {
+ return (JobDefinition) getJobDefinitionOrThrowException(theJobInstance.getJobDefinitionId(), theJobInstance.getJobDefinitionVersion());
}
public Collection getJobDefinitionVersions(String theDefinitionId) {
diff --git a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/JobStepExecutor.java b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/JobStepExecutor.java
index 897ce6b86bd..e0bae4cdd16 100644
--- a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/JobStepExecutor.java
+++ b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/JobStepExecutor.java
@@ -55,7 +55,9 @@ public class JobStepExecutor theCursor,
- @Nonnull WorkChunkProcessor theExecutor, IJobMaintenanceService theJobMaintenanceService) {
+ @Nonnull WorkChunkProcessor theExecutor,
+ @Nonnull IJobMaintenanceService theJobMaintenanceService,
+ @Nonnull JobDefinitionRegistry theJobDefinitionRegistry) {
myJobPersistence = theJobPersistence;
myBatchJobSender = theBatchJobSender;
myDefinition = theCursor.jobDefinition;
@@ -65,11 +67,11 @@ public class JobStepExecutor stepExecutorOutput = myJobExecutorSvc.doExecution(
myCursor,
myInstance,
diff --git a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/JobStepExecutorFactory.java b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/JobStepExecutorFactory.java
index 4b5ba0e48d1..d6cef77975c 100644
--- a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/JobStepExecutorFactory.java
+++ b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/JobStepExecutorFactory.java
@@ -35,18 +35,21 @@ public class JobStepExecutorFactory {
private final BatchJobSender myBatchJobSender;
private final WorkChunkProcessor myJobStepExecutorSvc;
private final IJobMaintenanceService myJobMaintenanceService;
+ private final JobDefinitionRegistry myJobDefinitionRegistry;
public JobStepExecutorFactory(@Nonnull IJobPersistence theJobPersistence,
@Nonnull BatchJobSender theBatchJobSender,
@Nonnull WorkChunkProcessor theExecutorSvc,
- @Nonnull IJobMaintenanceService theJobMaintenanceService) {
+ @Nonnull IJobMaintenanceService theJobMaintenanceService,
+ @Nonnull JobDefinitionRegistry theJobDefinitionRegistry) {
myJobPersistence = theJobPersistence;
myBatchJobSender = theBatchJobSender;
myJobStepExecutorSvc = theExecutorSvc;
myJobMaintenanceService = theJobMaintenanceService;
+ myJobDefinitionRegistry = theJobDefinitionRegistry;
}
public JobStepExecutor newJobStepExecutor(@Nonnull JobInstance theInstance, WorkChunk theWorkChunk, @Nonnull JobWorkCursor theCursor) {
- return new JobStepExecutor<>(myJobPersistence, myBatchJobSender, theInstance, theWorkChunk, theCursor, myJobStepExecutorSvc, myJobMaintenanceService);
+ return new JobStepExecutor<>(myJobPersistence, myBatchJobSender, theInstance, theWorkChunk, theCursor, myJobStepExecutorSvc, myJobMaintenanceService, myJobDefinitionRegistry);
}
}
diff --git a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/ReductionStepDataSink.java b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/ReductionStepDataSink.java
index 0c8e790182e..99cd833e3bc 100644
--- a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/ReductionStepDataSink.java
+++ b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/ReductionStepDataSink.java
@@ -22,14 +22,16 @@ package ca.uhn.fhir.batch2.coordinator;
import ca.uhn.fhir.batch2.api.IJobPersistence;
import ca.uhn.fhir.batch2.api.JobExecutionFailedException;
-import ca.uhn.fhir.batch2.model.JobDefinition;
+import ca.uhn.fhir.batch2.maintenance.JobChunkProgressAccumulator;
import ca.uhn.fhir.batch2.model.JobInstance;
import ca.uhn.fhir.batch2.model.JobWorkCursor;
+import ca.uhn.fhir.batch2.model.StatusEnum;
import ca.uhn.fhir.batch2.model.WorkChunkData;
+import ca.uhn.fhir.batch2.progress.JobInstanceProgressCalculator;
import ca.uhn.fhir.i18n.Msg;
-import ca.uhn.fhir.util.Logs;
import ca.uhn.fhir.model.api.IModelJson;
import ca.uhn.fhir.util.JsonUtil;
+import ca.uhn.fhir.util.Logs;
import org.slf4j.Logger;
import java.util.Optional;
@@ -39,13 +41,15 @@ public class ReductionStepDataSink theJobWorkCursor,
- JobDefinition theDefinition,
- IJobPersistence thePersistence) {
+ public ReductionStepDataSink(String theInstanceId,
+ JobWorkCursor theJobWorkCursor,
+ IJobPersistence thePersistence,
+ JobDefinitionRegistry theJobDefinitionRegistry) {
super(theInstanceId, theJobWorkCursor);
myJobPersistence = thePersistence;
+ myJobDefinitionRegistry = theJobDefinitionRegistry;
}
@Override
@@ -57,15 +61,27 @@ public class ReductionStepDataSink JsonUtil.serialize(instance))
+ .log("New instance state: {}");
+
myJobPersistence.updateInstance(instance);
+
+ ourLog.info("Finalized job instance {} with report length {} chars", instance.getInstanceId(), dataString.length());
+
} else {
String msg = "No instance found with Id " + instanceId;
ourLog.error(msg);
diff --git a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/ReductionStepExecutor.java b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/ReductionStepExecutor.java
deleted file mode 100644
index 4b4a88dbc74..00000000000
--- a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/ReductionStepExecutor.java
+++ /dev/null
@@ -1,177 +0,0 @@
-package ca.uhn.fhir.batch2.coordinator;
-
-/*-
- * #%L
- * HAPI FHIR JPA Server - Batch2 Task Processor
- * %%
- * Copyright (C) 2014 - 2023 Smile CDR, Inc.
- * %%
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * #L%
- */
-
-import ca.uhn.fhir.batch2.api.ChunkExecutionDetails;
-import ca.uhn.fhir.batch2.api.IJobPersistence;
-import ca.uhn.fhir.batch2.api.IReductionStepWorker;
-import ca.uhn.fhir.batch2.model.ChunkOutcome;
-import ca.uhn.fhir.batch2.model.JobDefinitionStep;
-import ca.uhn.fhir.batch2.model.JobInstance;
-import ca.uhn.fhir.batch2.model.StatusEnum;
-import ca.uhn.fhir.batch2.model.WorkChunk;
-import ca.uhn.fhir.model.api.IModelJson;
-import ca.uhn.fhir.util.Logs;
-import org.slf4j.Logger;
-import org.springframework.transaction.PlatformTransactionManager;
-import org.springframework.transaction.TransactionDefinition;
-import org.springframework.transaction.support.TransactionTemplate;
-
-import java.util.stream.Stream;
-
-public class ReductionStepExecutor {
- private static final Logger ourLog = Logs.getBatchTroubleshootingLog();
- private final IJobPersistence myJobPersistence;
- private final PlatformTransactionManager myTxManager;
- private final TransactionTemplate myTxTemplate;
-
- public ReductionStepExecutor(IJobPersistence theJobPersistence, PlatformTransactionManager theTransactionManager) {
- myJobPersistence = theJobPersistence;
- myTxManager = theTransactionManager;
- myTxTemplate = new TransactionTemplate(theTransactionManager);
- myTxTemplate.setPropagationBehavior(TransactionDefinition.PROPAGATION_REQUIRES_NEW);
- }
-
- /**
- * Do work and construct execution details for job reduction step
- */
- boolean executeReductionStep(
- JobInstance theInstance,
- JobDefinitionStep theStep,
- Class theInputType,
- PT theParameters
- ) {
- IReductionStepWorker reductionStepWorker = (IReductionStepWorker) theStep.getJobStepWorker();
-
- if (!myJobPersistence.markInstanceAsStatus(theInstance.getInstanceId(), StatusEnum.FINALIZE)) {
- ourLog.warn(
- "JobInstance[{}] is already in FINALIZE state. In memory status is {}. Reduction step will not rerun!"
- + " This could be a long running reduction job resulting in the processed msg not being acknowledge,"
- + " or the result of a failed process or server restarting.",
- theInstance.getInstanceId(),
- theInstance.getStatus().name()
- );
- return false;
- }
- theInstance.setStatus(StatusEnum.FINALIZE);
-
- boolean defaultSuccessValue = true;
- ReductionStepChunkProcessingResponse response = new ReductionStepChunkProcessingResponse(defaultSuccessValue);
-
- try {
- myTxTemplate.executeWithoutResult((status) -> {
- try(Stream chunkIterator2 = myJobPersistence.fetchAllWorkChunksForStepStream(theInstance.getInstanceId(), theStep.getStepId())) {
- chunkIterator2.forEach((chunk) -> {
- processChunk(chunk, theInstance, theInputType, theParameters, reductionStepWorker, response);
- });
- }
- });
- } finally {
-
- if (response.hasSuccessfulChunksIds()) {
- // complete the steps without making a new work chunk
- myJobPersistence.markWorkChunksWithStatusAndWipeData(theInstance.getInstanceId(),
- response.getSuccessfulChunkIds(),
- StatusEnum.COMPLETED,
- null // error message - none
- );
- }
-
- if (response.hasFailedChunkIds()) {
- // mark any failed chunks as failed for aborting
- myJobPersistence.markWorkChunksWithStatusAndWipeData(theInstance.getInstanceId(),
- response.getFailedChunksIds(),
- StatusEnum.FAILED,
- "JOB ABORTED");
- }
-
- }
-
- // if no successful chunks, return false
- if (!response.hasSuccessfulChunksIds()) {
- response.setSuccessful(false);
- }
-
- return response.isSuccessful();
- }
-
- private
- void processChunk(WorkChunk theChunk,
- JobInstance theInstance,
- Class theInputType,
- PT theParameters,
- IReductionStepWorker theReductionStepWorker,
- ReductionStepChunkProcessingResponse theResponseObject){
-
- if (!theChunk.getStatus().isIncomplete()) {
- // This should never happen since jobs with reduction are required to be gated
- ourLog.error("Unexpected chunk {} with status {} found while reducing {}. No chunks feeding into a reduction step should be complete.", theChunk.getId(), theChunk.getStatus(), theInstance);
- return;
- }
-
- if (theResponseObject.hasFailedChunkIds()) {
- // we are going to fail all future chunks now
- theResponseObject.addFailedChunkId(theChunk);
- } else {
- try {
- // feed them into our reduction worker
- // this is the most likely area to throw,
- // as this is where db actions and processing is likely to happen
- ChunkExecutionDetails chunkDetails = new ChunkExecutionDetails<>(theChunk.getData(theInputType), theParameters, theInstance.getInstanceId(), theChunk.getId());
-
- ChunkOutcome outcome = theReductionStepWorker.consume(chunkDetails);
-
- switch (outcome.getStatus()) {
- case SUCCESS:
- theResponseObject.addSuccessfulChunkId(theChunk);
- break;
-
- case ABORT:
- ourLog.error("Processing of work chunk {} resulted in aborting job.", theChunk.getId());
-
- // fail entire job - including all future workchunks
- theResponseObject.addFailedChunkId(theChunk);
- theResponseObject.setSuccessful(false);
- break;
-
- case FAIL:
- // non-idempotent; but failed chunks will be
- // ignored on a second runthrough of reduction step
- myJobPersistence.markWorkChunkAsFailed(theChunk.getId(),
- "Step worker failed to process work chunk " + theChunk.getId());
- theResponseObject.setSuccessful(false);
- break;
- }
- } catch (Exception e) {
- String msg = String.format(
- "Reduction step failed to execute chunk reduction for chunk %s with exception: %s.",
- theChunk.getId(),
- e.getMessage()
- );
- // we got a failure in a reduction
- ourLog.error(msg, e);
- theResponseObject.setSuccessful(false);
-
- myJobPersistence.markWorkChunkAsFailed(theChunk.getId(), msg);
- }
- }
- }
-}
diff --git a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/ReductionStepExecutorServiceImpl.java b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/ReductionStepExecutorServiceImpl.java
new file mode 100644
index 00000000000..519fb42f7d2
--- /dev/null
+++ b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/ReductionStepExecutorServiceImpl.java
@@ -0,0 +1,338 @@
+package ca.uhn.fhir.batch2.coordinator;
+
+/*-
+ * #%L
+ * HAPI FHIR JPA Server - Batch2 Task Processor
+ * %%
+ * Copyright (C) 2014 - 2023 Smile CDR, Inc.
+ * %%
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * #L%
+ */
+
+import ca.uhn.fhir.batch2.api.ChunkExecutionDetails;
+import ca.uhn.fhir.batch2.api.IJobPersistence;
+import ca.uhn.fhir.batch2.api.IReductionStepExecutorService;
+import ca.uhn.fhir.batch2.api.IReductionStepWorker;
+import ca.uhn.fhir.batch2.api.StepExecutionDetails;
+import ca.uhn.fhir.batch2.model.ChunkOutcome;
+import ca.uhn.fhir.batch2.model.JobDefinitionStep;
+import ca.uhn.fhir.batch2.model.JobInstance;
+import ca.uhn.fhir.batch2.model.JobWorkCursor;
+import ca.uhn.fhir.batch2.model.StatusEnum;
+import ca.uhn.fhir.batch2.model.WorkChunk;
+import ca.uhn.fhir.jpa.dao.tx.IHapiTransactionService;
+import ca.uhn.fhir.jpa.model.sched.HapiJob;
+import ca.uhn.fhir.jpa.model.sched.IHasScheduledJobs;
+import ca.uhn.fhir.jpa.model.sched.ISchedulerService;
+import ca.uhn.fhir.jpa.model.sched.ScheduledJobDefinition;
+import ca.uhn.fhir.model.api.IModelJson;
+import ca.uhn.fhir.rest.server.exceptions.InternalErrorException;
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.lang3.time.DateUtils;
+import org.quartz.JobExecutionContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.context.event.ContextClosedEvent;
+import org.springframework.context.event.ContextRefreshedEvent;
+import org.springframework.context.event.EventListener;
+import org.springframework.scheduling.concurrent.CustomizableThreadFactory;
+import org.springframework.transaction.annotation.Propagation;
+
+import javax.annotation.Nonnull;
+import java.util.Collections;
+import java.util.LinkedHashMap;
+import java.util.Map;
+import java.util.Timer;
+import java.util.TimerTask;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.stream.Stream;
+
+public class ReductionStepExecutorServiceImpl implements IReductionStepExecutorService, IHasScheduledJobs {
+ public static final String SCHEDULED_JOB_ID = ReductionStepExecutorScheduledJob.class.getName();
+ private static final Logger ourLog = LoggerFactory.getLogger(ReductionStepExecutorServiceImpl.class);
+ private final Map myInstanceIdToJobWorkCursor = Collections.synchronizedMap(new LinkedHashMap<>());
+ private final ExecutorService myReducerExecutor;
+ private final IJobPersistence myJobPersistence;
+ private final IHapiTransactionService myTransactionService;
+ private final Semaphore myCurrentlyExecuting = new Semaphore(1);
+ private final AtomicReference myCurrentlyFinalizingInstanceId = new AtomicReference<>();
+ private Timer myHeartbeatTimer;
+ private final JobDefinitionRegistry myJobDefinitionRegistry;
+
+
+ /**
+ * Constructor
+ */
+ public ReductionStepExecutorServiceImpl(IJobPersistence theJobPersistence, IHapiTransactionService theTransactionService, JobDefinitionRegistry theJobDefinitionRegistry) {
+ myJobPersistence = theJobPersistence;
+ myTransactionService = theTransactionService;
+ myJobDefinitionRegistry = theJobDefinitionRegistry;
+
+ myReducerExecutor = Executors.newSingleThreadExecutor(new CustomizableThreadFactory("batch2-reducer"));
+ }
+
+
+ @EventListener(ContextRefreshedEvent.class)
+ public void start() {
+ myHeartbeatTimer = new Timer("batch2-reducer-heartbeat");
+ myHeartbeatTimer.schedule(new HeartbeatTimerTask(), DateUtils.MILLIS_PER_MINUTE, DateUtils.MILLIS_PER_MINUTE);
+ }
+
+ private void runHeartbeat() {
+ String currentlyFinalizingInstanceId = myCurrentlyFinalizingInstanceId.get();
+ if (currentlyFinalizingInstanceId != null) {
+ ourLog.info("Running heartbeat for instance: {}", currentlyFinalizingInstanceId);
+ executeInTransactionWithSynchronization(()->{
+ myJobPersistence.updateInstanceUpdateTime(currentlyFinalizingInstanceId);
+ return null;
+ });
+ }
+ }
+
+ @EventListener(ContextClosedEvent.class)
+ public void shutdown() {
+ myHeartbeatTimer.cancel();
+ }
+
+
+ @Override
+ public void triggerReductionStep(String theInstanceId, JobWorkCursor, ?, ?> theJobWorkCursor) {
+ myInstanceIdToJobWorkCursor.putIfAbsent(theInstanceId, theJobWorkCursor);
+ if (myCurrentlyExecuting.availablePermits() > 0) {
+ myReducerExecutor.submit(() -> reducerPass());
+ }
+ }
+
+ @Override
+ public void reducerPass() {
+ if (myCurrentlyExecuting.tryAcquire()) {
+ try {
+
+ String[] instanceIds = myInstanceIdToJobWorkCursor.keySet().toArray(new String[0]);
+ if (instanceIds.length > 0) {
+ String instanceId = instanceIds[0];
+ myCurrentlyFinalizingInstanceId.set(instanceId);
+ JobWorkCursor, ?, ?> jobWorkCursor = myInstanceIdToJobWorkCursor.get(instanceId);
+ executeReductionStep(instanceId, jobWorkCursor);
+
+ // If we get here, this succeeded. Purge the instance from the work queue
+ myInstanceIdToJobWorkCursor.remove(instanceId);
+ }
+
+ } finally {
+ myCurrentlyFinalizingInstanceId.set(null);
+ myCurrentlyExecuting.release();
+ }
+ }
+ }
+
+ @VisibleForTesting
+ ReductionStepChunkProcessingResponse executeReductionStep(String theInstanceId, JobWorkCursor theJobWorkCursor) {
+
+ JobDefinitionStep step = theJobWorkCursor.getCurrentStep();
+
+ JobInstance instance = executeInTransactionWithSynchronization(() -> {
+ JobInstance currentInstance = myJobPersistence.fetchInstance(theInstanceId).orElseThrow(() -> new InternalErrorException("Unknown currentInstance: " + theInstanceId));
+ boolean shouldProceed = false;
+ switch (currentInstance.getStatus()) {
+ case IN_PROGRESS:
+ case ERRORED:
+ if (myJobPersistence.markInstanceAsStatus(currentInstance.getInstanceId(), StatusEnum.FINALIZE)) {
+ ourLog.info("Job instance {} has been set to FINALIZE state - Beginning reducer step", currentInstance.getInstanceId());
+ shouldProceed = true;
+ }
+ break;
+ case FINALIZE:
+ case COMPLETED:
+ case FAILED:
+ case QUEUED:
+ case CANCELLED:
+ break;
+ }
+
+ if (!shouldProceed) {
+ ourLog.warn(
+ "JobInstance[{}] should not be finalized at this time. In memory status is {}. Reduction step will not rerun!"
+ + " This could be a long running reduction job resulting in the processed msg not being acknowledge,"
+ + " or the result of a failed process or server restarting.",
+ currentInstance.getInstanceId(),
+ currentInstance.getStatus().name()
+ );
+ return null;
+ }
+
+ return currentInstance;
+ });
+ if (instance == null) {
+ return new ReductionStepChunkProcessingResponse(false);
+ }
+
+ PT parameters = instance.getParameters(theJobWorkCursor.getJobDefinition().getParametersType());
+ IReductionStepWorker reductionStepWorker = (IReductionStepWorker) step.getJobStepWorker();
+
+ instance.setStatus(StatusEnum.FINALIZE);
+
+ boolean defaultSuccessValue = true;
+ ReductionStepChunkProcessingResponse response = new ReductionStepChunkProcessingResponse(defaultSuccessValue);
+
+ try {
+ executeInTransactionWithSynchronization(() -> {
+ try (Stream chunkIterator = myJobPersistence.fetchAllWorkChunksForStepStream(instance.getInstanceId(), step.getStepId())) {
+ chunkIterator.forEach((chunk) -> {
+ processChunk(chunk, instance, parameters, reductionStepWorker, response, theJobWorkCursor);
+ });
+ }
+ return null;
+ });
+ } finally {
+
+ executeInTransactionWithSynchronization(() -> {
+ ourLog.info("Reduction step for instance[{}] produced {} successful and {} failed chunks", instance.getInstanceId(), response.getSuccessfulChunkIds().size(), response.getFailedChunksIds().size());
+
+ ReductionStepDataSink dataSink = new ReductionStepDataSink<>(instance.getInstanceId(), theJobWorkCursor, myJobPersistence, myJobDefinitionRegistry);
+ StepExecutionDetails chunkDetails = new StepExecutionDetails<>(parameters, null, instance, "REDUCTION");
+
+ if (response.isSuccessful()) {
+ reductionStepWorker.run(chunkDetails, dataSink);
+ }
+
+ if (response.hasSuccessfulChunksIds()) {
+ // complete the steps without making a new work chunk
+ myJobPersistence.markWorkChunksWithStatusAndWipeData(instance.getInstanceId(),
+ response.getSuccessfulChunkIds(),
+ StatusEnum.COMPLETED,
+ null // error message - none
+ );
+ }
+
+ if (response.hasFailedChunkIds()) {
+ // mark any failed chunks as failed for aborting
+ myJobPersistence.markWorkChunksWithStatusAndWipeData(instance.getInstanceId(),
+ response.getFailedChunksIds(),
+ StatusEnum.FAILED,
+ "JOB ABORTED");
+ }
+ return null;
+ });
+
+ }
+
+ // if no successful chunks, return false
+ if (!response.hasSuccessfulChunksIds()) {
+ response.setSuccessful(false);
+ }
+
+ return response;
+ }
+
+ private T executeInTransactionWithSynchronization(Callable runnable) {
+ return myTransactionService
+ .withRequest(null)
+ .withPropagation(Propagation.REQUIRES_NEW)
+ .execute(runnable);
+ }
+
+
+ @Override
+ public void scheduleJobs(ISchedulerService theSchedulerService) {
+ theSchedulerService.scheduleClusteredJob(10 * DateUtils.MILLIS_PER_SECOND, buildJobDefinition());
+ }
+
+ @Nonnull
+ private ScheduledJobDefinition buildJobDefinition() {
+ ScheduledJobDefinition jobDefinition = new ScheduledJobDefinition();
+ jobDefinition.setId(SCHEDULED_JOB_ID);
+ jobDefinition.setJobClass(ReductionStepExecutorScheduledJob.class);
+ return jobDefinition;
+ }
+
+ private
+ void processChunk(WorkChunk theChunk,
+ JobInstance theInstance,
+ PT theParameters,
+ IReductionStepWorker theReductionStepWorker,
+ ReductionStepChunkProcessingResponse theResponseObject,
+ JobWorkCursor theJobWorkCursor) {
+
+ if (!theChunk.getStatus().isIncomplete()) {
+ // This should never happen since jobs with reduction are required to be gated
+ ourLog.error("Unexpected chunk {} with status {} found while reducing {}. No chunks feeding into a reduction step should be complete.", theChunk.getId(), theChunk.getStatus(), theInstance);
+ return;
+ }
+
+ if (theResponseObject.hasFailedChunkIds()) {
+ // we are going to fail all future chunks now
+ theResponseObject.addFailedChunkId(theChunk);
+ } else {
+ try {
+ // feed them into our reduction worker
+ // this is the most likely area to throw,
+ // as this is where db actions and processing is likely to happen
+ IT chunkData = theChunk.getData(theJobWorkCursor.getCurrentStep().getInputType());
+ ChunkExecutionDetails chunkDetails = new ChunkExecutionDetails<>(chunkData, theParameters, theInstance.getInstanceId(), theChunk.getId());
+
+ ChunkOutcome outcome = theReductionStepWorker.consume(chunkDetails);
+
+ switch (outcome.getStatus()) {
+ case SUCCESS:
+ theResponseObject.addSuccessfulChunkId(theChunk);
+ break;
+
+ case FAILED:
+ ourLog.error("Processing of work chunk {} resulted in aborting job.", theChunk.getId());
+
+ // fail entire job - including all future workchunks
+ theResponseObject.addFailedChunkId(theChunk);
+ theResponseObject.setSuccessful(false);
+ break;
+ }
+ } catch (Exception e) {
+ String msg = String.format(
+ "Reduction step failed to execute chunk reduction for chunk %s with exception: %s.",
+ theChunk.getId(),
+ e.getMessage()
+ );
+ // we got a failure in a reduction
+ ourLog.error(msg, e);
+ theResponseObject.setSuccessful(false);
+
+ myJobPersistence.markWorkChunkAsFailed(theChunk.getId(), msg);
+ }
+ }
+ }
+
+ private class HeartbeatTimerTask extends TimerTask {
+ @Override
+ public void run() {
+ runHeartbeat();
+ }
+ }
+
+ public static class ReductionStepExecutorScheduledJob implements HapiJob {
+ @Autowired
+ private IReductionStepExecutorService myTarget;
+
+ @Override
+ public void execute(JobExecutionContext theContext) {
+ myTarget.reducerPass();
+ }
+ }
+
+
+}
diff --git a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/StepExecutor.java b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/StepExecutor.java
index 433e2a256f3..9dd208be65b 100644
--- a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/StepExecutor.java
+++ b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/StepExecutor.java
@@ -108,7 +108,7 @@ public class StepExecutor {
int recordsProcessed = outcome.getRecordsProcessed();
int recoveredErrorCount = theDataSink.getRecoveredErrorCount();
- myJobPersistence.markWorkChunkAsCompletedAndClearData(chunkId, recordsProcessed);
+ myJobPersistence.markWorkChunkAsCompletedAndClearData(theStepExecutionDetails.getInstance().getInstanceId(), chunkId, recordsProcessed);
if (recoveredErrorCount > 0) {
myJobPersistence.incrementWorkChunkErrorCount(chunkId, recoveredErrorCount);
}
diff --git a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/SynchronizedJobPersistenceWrapper.java b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/SynchronizedJobPersistenceWrapper.java
index 7bceba6629e..885826b92d5 100644
--- a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/SynchronizedJobPersistenceWrapper.java
+++ b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/SynchronizedJobPersistenceWrapper.java
@@ -120,8 +120,8 @@ public class SynchronizedJobPersistenceWrapper implements IJobPersistence {
}
@Override
- public synchronized void markWorkChunkAsCompletedAndClearData(String theChunkId, int theRecordsProcessed) {
- myWrap.markWorkChunkAsCompletedAndClearData(theChunkId, theRecordsProcessed);
+ public synchronized void markWorkChunkAsCompletedAndClearData(String theInstanceId, String theChunkId, int theRecordsProcessed) {
+ myWrap.markWorkChunkAsCompletedAndClearData(theInstanceId, theChunkId, theRecordsProcessed);
}
@Override
@@ -170,8 +170,8 @@ public class SynchronizedJobPersistenceWrapper implements IJobPersistence {
}
@Override
- public synchronized void deleteChunks(String theInstanceId) {
- myWrap.deleteChunks(theInstanceId);
+ public synchronized void deleteChunksAndMarkInstanceAsChunksPurged(String theInstanceId) {
+ myWrap.deleteChunksAndMarkInstanceAsChunksPurged(theInstanceId);
}
@Override
@@ -193,4 +193,9 @@ public class SynchronizedJobPersistenceWrapper implements IJobPersistence {
public List fetchallchunkidsforstepWithStatus(String theInstanceId, String theStepId, StatusEnum theStatusEnum) {
return myWrap.fetchallchunkidsforstepWithStatus(theInstanceId, theStepId, theStatusEnum);
}
+
+ @Override
+ public synchronized void updateInstanceUpdateTime(String theInstanceId) {
+ myWrap.updateInstanceUpdateTime(theInstanceId);
+ }
}
diff --git a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/WorkChannelMessageHandler.java b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/WorkChannelMessageHandler.java
index 4434023f5cb..d1ca08e7dca 100644
--- a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/WorkChannelMessageHandler.java
+++ b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/WorkChannelMessageHandler.java
@@ -32,11 +32,9 @@ import ca.uhn.fhir.batch2.model.JobWorkNotificationJsonMessage;
import ca.uhn.fhir.batch2.model.StatusEnum;
import ca.uhn.fhir.batch2.model.WorkChunk;
import ca.uhn.fhir.batch2.progress.JobInstanceStatusUpdater;
-import ca.uhn.fhir.batch2.util.Batch2Constants;
import ca.uhn.fhir.rest.server.exceptions.InternalErrorException;
import ca.uhn.fhir.util.Logs;
import org.apache.commons.lang3.Validate;
-import javax.validation.constraints.NotNull;
import org.slf4j.Logger;
import org.springframework.messaging.Message;
import org.springframework.messaging.MessageHandler;
@@ -44,9 +42,6 @@ import org.springframework.messaging.MessagingException;
import javax.annotation.Nonnull;
import java.util.Optional;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.ThreadFactory;
/**
* This handler receives batch work request messages and performs the batch work requested by the message
@@ -65,8 +60,8 @@ class WorkChannelMessageHandler implements MessageHandler {
@Nonnull IJobMaintenanceService theJobMaintenanceService) {
myJobPersistence = theJobPersistence;
myJobDefinitionRegistry = theJobDefinitionRegistry;
- myJobStepExecutorFactory = new JobStepExecutorFactory(theJobPersistence, theBatchJobSender, theExecutorSvc, theJobMaintenanceService);
- myJobInstanceStatusUpdater = new JobInstanceStatusUpdater(theJobPersistence);
+ myJobStepExecutorFactory = new JobStepExecutorFactory(theJobPersistence, theBatchJobSender, theExecutorSvc, theJobMaintenanceService, theJobDefinitionRegistry);
+ myJobInstanceStatusUpdater = new JobInstanceStatusUpdater(theJobPersistence, theJobDefinitionRegistry);
}
@Override
@@ -81,27 +76,19 @@ class WorkChannelMessageHandler implements MessageHandler {
String chunkId = workNotification.getChunkId();
Validate.notNull(chunkId);
- boolean isReductionWorkNotification = Batch2Constants.REDUCTION_STEP_CHUNK_ID_PLACEHOLDER.equals(chunkId);
-
JobWorkCursor, ?, ?> cursor = null;
WorkChunk workChunk = null;
- if (!isReductionWorkNotification) {
- Optional chunkOpt = myJobPersistence.fetchWorkChunkSetStartTimeAndMarkInProgress(chunkId);
- if (chunkOpt.isEmpty()) {
- ourLog.error("Unable to find chunk with ID {} - Aborting", chunkId);
- return;
- }
- workChunk = chunkOpt.get();
- ourLog.debug("Worker picked up chunk. [chunkId={}, stepId={}, startTime={}]", chunkId, workChunk.getTargetStepId(), workChunk.getStartTime());
-
- cursor = buildCursorFromNotification(workNotification);
-
- Validate.isTrue(workChunk.getTargetStepId().equals(cursor.getCurrentStepId()), "Chunk %s has target step %s but expected %s", chunkId, workChunk.getTargetStepId(), cursor.getCurrentStepId());
- } else {
- ourLog.debug("Processing reduction step work notification. No associated workchunks.");
-
- cursor = buildCursorFromNotification(workNotification);
+ Optional chunkOpt = myJobPersistence.fetchWorkChunkSetStartTimeAndMarkInProgress(chunkId);
+ if (chunkOpt.isEmpty()) {
+ ourLog.error("Unable to find chunk with ID {} - Aborting", chunkId);
+ return;
}
+ workChunk = chunkOpt.get();
+ ourLog.debug("Worker picked up chunk. [chunkId={}, stepId={}, startTime={}]", chunkId, workChunk.getTargetStepId(), workChunk.getStartTime());
+
+ cursor = buildCursorFromNotification(workNotification);
+
+ Validate.isTrue(workChunk.getTargetStepId().equals(cursor.getCurrentStepId()), "Chunk %s has target step %s but expected %s", chunkId, workChunk.getTargetStepId(), cursor.getCurrentStepId());
Optional instanceOpt = myJobPersistence.fetchInstance(workNotification.getInstanceId());
JobInstance instance = instanceOpt.orElseThrow(() -> new InternalErrorException("Unknown instance: " + workNotification.getInstanceId()));
@@ -116,27 +103,7 @@ class WorkChannelMessageHandler implements MessageHandler {
}
JobStepExecutor,?,?> stepExecutor = myJobStepExecutorFactory.newJobStepExecutor(instance, workChunk, cursor);
- // TODO - ls
- /*
- * We should change this to actually have
- * the reduction step take in smaller sets of
- * lists of chunks from the previous steps (one
- * at a time still) and compose the
- * report gradually and in an idempotent way
- */
- if (isReductionWorkNotification) {
- // do async due to long running process
- // we'll fire off a separate thread and let the job continue
- ScheduledExecutorService exService = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {
- @Override
- public Thread newThread(@NotNull Runnable r) {
- return new Thread(r, "Reduction-step-thread");
- }
- });
- exService.execute(stepExecutor::executeStep);
- } else {
- stepExecutor.executeStep();
- }
+ stepExecutor.executeStep();
}
private void markInProgressIfQueued(JobInstance theInstance) {
diff --git a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/WorkChunkProcessor.java b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/WorkChunkProcessor.java
index 4ba661a58fe..06014236cc1 100644
--- a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/WorkChunkProcessor.java
+++ b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/coordinator/WorkChunkProcessor.java
@@ -22,8 +22,6 @@ package ca.uhn.fhir.batch2.coordinator;
import ca.uhn.fhir.batch2.api.IJobPersistence;
import ca.uhn.fhir.batch2.api.IJobStepWorker;
-import ca.uhn.fhir.batch2.api.IReductionStepWorker;
-import ca.uhn.fhir.batch2.api.ReductionStepExecutionDetails;
import ca.uhn.fhir.batch2.api.StepExecutionDetails;
import ca.uhn.fhir.batch2.api.VoidModel;
import ca.uhn.fhir.batch2.channel.BatchJobSender;
@@ -32,11 +30,11 @@ import ca.uhn.fhir.batch2.model.JobDefinitionStep;
import ca.uhn.fhir.batch2.model.JobInstance;
import ca.uhn.fhir.batch2.model.JobWorkCursor;
import ca.uhn.fhir.batch2.model.WorkChunk;
+import ca.uhn.fhir.jpa.dao.tx.IHapiTransactionService;
import ca.uhn.fhir.model.api.IModelJson;
import ca.uhn.fhir.util.Logs;
import org.apache.commons.lang3.Validate;
import org.slf4j.Logger;
-import org.springframework.transaction.PlatformTransactionManager;
import javax.annotation.Nullable;
import java.util.Optional;
@@ -44,13 +42,10 @@ import java.util.Optional;
import static org.apache.commons.lang3.StringUtils.isBlank;
public class WorkChunkProcessor {
- private static final Logger ourLog = Logs.getBatchTroubleshootingLog();
-
- // TODO
/**
* This retry only works if your channel producer supports
* retries on message processing exceptions.
- *
+ *
* What's more, we may one day want to have this configurable
* by the caller.
* But since this is not a feature of HAPI,
@@ -58,29 +53,28 @@ public class WorkChunkProcessor {
*/
public static final int MAX_CHUNK_ERROR_COUNT = 3;
+ private static final Logger ourLog = Logs.getBatchTroubleshootingLog();
private final IJobPersistence myJobPersistence;
private final BatchJobSender myBatchJobSender;
private final StepExecutor myStepExecutor;
- private final ReductionStepExecutor myReductionStepExecutor;
public WorkChunkProcessor(IJobPersistence theJobPersistence,
BatchJobSender theSender,
- PlatformTransactionManager theTransactionManager) {
+ IHapiTransactionService theTransactionService) {
myJobPersistence = theJobPersistence;
myBatchJobSender = theSender;
myStepExecutor = new StepExecutor(theJobPersistence);
- myReductionStepExecutor = new ReductionStepExecutor(theJobPersistence, theTransactionManager);
}
/**
* Execute the work chunk.
*
- * @param theCursor - work cursor
- * @param theInstance - the job instance
+ * @param theCursor - work cursor
+ * @param theInstance - the job instance
* @param theWorkChunk - the work chunk (if available); can be null (for reduction step only!)
- * @param - Job parameters Type
- * @param - Step input parameters Type
- * @param - Step output parameters Type
+ * @param - Job parameters Type
+ * @param - Step input parameters Type
+ * @param - Step output parameters Type
* @return - JobStepExecution output. Contains the datasink and whether or not the execution had succeeded.
*/
public JobStepExecutorOutput
@@ -98,53 +92,37 @@ public class WorkChunkProcessor {
IJobStepWorker worker = step.getJobStepWorker();
BaseDataSink dataSink = getDataSink(theCursor, jobDefinition, instanceId);
- if (step.isReductionStep()) {
- // reduction step details
- boolean success = myReductionStepExecutor.executeReductionStep(theInstance, step, inputType, parameters);
+ assert !step.isReductionStep();
- if (success) {
- // Now call the normal step executor
- // the data sink stores the report on the instance (i.e. not chunks).
- // Assume the OT (report) data is smaller than the list of all IT data
-
- ReductionStepExecutionDetails reductionStepExecutionDetails = new ReductionStepExecutionDetails<>(parameters, theInstance);
- IReductionStepWorker reductionStepWorker = (IReductionStepWorker) step.getJobStepWorker();
-
- success = myStepExecutor.executeStep(reductionStepExecutionDetails, reductionStepWorker, dataSink);
- }
-
- return new JobStepExecutorOutput<>(success, dataSink);
- } else {
- // all other kinds of steps
- Validate.notNull(theWorkChunk);
- Optional> stepExecutionDetailsOpt = getExecutionDetailsForNonReductionStep(theWorkChunk, theInstance, inputType, parameters);
- if (!stepExecutionDetailsOpt.isPresent()) {
- return new JobStepExecutorOutput<>(false, dataSink);
- }
-
- StepExecutionDetails stepExecutionDetails = stepExecutionDetailsOpt.get();
-
- // execute the step
- boolean success = myStepExecutor.executeStep(stepExecutionDetails, worker, dataSink);
-
- // return results with data sink
- return new JobStepExecutorOutput<>(success, dataSink);
+ // all other kinds of steps
+ Validate.notNull(theWorkChunk);
+ Optional> stepExecutionDetailsOpt = getExecutionDetailsForNonReductionStep(theWorkChunk, theInstance, inputType, parameters);
+ if (!stepExecutionDetailsOpt.isPresent()) {
+ return new JobStepExecutorOutput<>(false, dataSink);
}
+
+ StepExecutionDetails stepExecutionDetails = stepExecutionDetailsOpt.get();
+
+ // execute the step
+ boolean success = myStepExecutor.executeStep(stepExecutionDetails, worker, dataSink);
+
+ // return results with data sink
+ return new JobStepExecutorOutput<>(success, dataSink);
}
/**
* Get the correct datasink for the cursor/job provided.
*/
@SuppressWarnings("unchecked")
- protected BaseDataSink getDataSink(
+ protected BaseDataSink getDataSink(
JobWorkCursor theCursor,
JobDefinition theJobDefinition,
String theInstanceId
) {
BaseDataSink dataSink;
- if (theCursor.isReductionStep()) {
- dataSink = new ReductionStepDataSink<>(theInstanceId, theCursor, theJobDefinition, myJobPersistence);
- } else if (theCursor.isFinalStep()) {
+
+ assert !theCursor.isReductionStep();
+ if (theCursor.isFinalStep()) {
dataSink = (BaseDataSink) new FinalStepDataSink<>(theJobDefinition.getJobDefinitionId(), theInstanceId, theCursor.asFinalCursor());
} else {
dataSink = new JobDataSink<>(myBatchJobSender, myJobPersistence, theJobDefinition, theInstanceId, theCursor);
diff --git a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/maintenance/JobInstanceProcessor.java b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/maintenance/JobInstanceProcessor.java
index 5243f2473cd..9e26284ad42 100644
--- a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/maintenance/JobInstanceProcessor.java
+++ b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/maintenance/JobInstanceProcessor.java
@@ -21,15 +21,17 @@ package ca.uhn.fhir.batch2.maintenance;
*/
import ca.uhn.fhir.batch2.api.IJobPersistence;
+import ca.uhn.fhir.batch2.api.IReductionStepExecutorService;
import ca.uhn.fhir.batch2.channel.BatchJobSender;
-import ca.uhn.fhir.batch2.coordinator.WorkChunkProcessor;
+import ca.uhn.fhir.batch2.coordinator.JobDefinitionRegistry;
+import ca.uhn.fhir.batch2.model.JobDefinition;
import ca.uhn.fhir.batch2.model.JobInstance;
import ca.uhn.fhir.batch2.model.JobWorkCursor;
import ca.uhn.fhir.batch2.model.JobWorkNotification;
import ca.uhn.fhir.batch2.model.StatusEnum;
import ca.uhn.fhir.batch2.progress.JobInstanceProgressCalculator;
import ca.uhn.fhir.batch2.progress.JobInstanceStatusUpdater;
-import ca.uhn.fhir.batch2.util.Batch2Constants;
+import ca.uhn.fhir.model.api.IModelJson;
import ca.uhn.fhir.util.Logs;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
@@ -42,113 +44,121 @@ public class JobInstanceProcessor {
private final IJobPersistence myJobPersistence;
private final BatchJobSender myBatchJobSender;
-
- private final JobInstance myInstance;
private final JobChunkProgressAccumulator myProgressAccumulator;
private final JobInstanceProgressCalculator myJobInstanceProgressCalculator;
- private final WorkChunkProcessor myJobExecutorSvc;
private final JobInstanceStatusUpdater myJobInstanceStatusUpdater;
+ private final IReductionStepExecutorService myReductionStepExecutorService;
+ private final String myInstanceId;
+ private final JobDefinitionRegistry myJobDefinitionegistry;
JobInstanceProcessor(IJobPersistence theJobPersistence,
BatchJobSender theBatchJobSender,
- JobInstance theInstance,
+ String theInstanceId,
JobChunkProgressAccumulator theProgressAccumulator,
- WorkChunkProcessor theExecutorSvc
- ) {
+ IReductionStepExecutorService theReductionStepExecutorService,
+ JobDefinitionRegistry theJobDefinitionRegistry) {
myJobPersistence = theJobPersistence;
myBatchJobSender = theBatchJobSender;
- myInstance = theInstance;
- myJobExecutorSvc = theExecutorSvc;
+ myInstanceId = theInstanceId;
myProgressAccumulator = theProgressAccumulator;
- myJobInstanceProgressCalculator = new JobInstanceProgressCalculator(theJobPersistence, theInstance, theProgressAccumulator);
- myJobInstanceStatusUpdater = new JobInstanceStatusUpdater(theJobPersistence);
+ myReductionStepExecutorService = theReductionStepExecutorService;
+ myJobDefinitionegistry = theJobDefinitionRegistry;
+ myJobInstanceProgressCalculator = new JobInstanceProgressCalculator(theJobPersistence, theProgressAccumulator, theJobDefinitionRegistry);
+ myJobInstanceStatusUpdater = new JobInstanceStatusUpdater(theJobPersistence, theJobDefinitionRegistry);
}
public void process() {
- handleCancellation();
- cleanupInstance();
- triggerGatedExecutions();
+ JobInstance theInstance = myJobPersistence.fetchInstance(myInstanceId).orElse(null);
+ if (theInstance == null) {
+ return;
+ }
+
+ handleCancellation(theInstance);
+ cleanupInstance(theInstance);
+ triggerGatedExecutions(theInstance);
}
- private void handleCancellation() {
- if (myInstance.isPendingCancellationRequest()) {
- myInstance.setErrorMessage(buildCancelledMessage());
- myJobInstanceStatusUpdater.setCancelled(myInstance);
+ private void handleCancellation(JobInstance theInstance) {
+ if (theInstance.isPendingCancellationRequest()) {
+ theInstance.setErrorMessage(buildCancelledMessage(theInstance));
+ myJobInstanceStatusUpdater.setCancelled(theInstance);
}
}
- private String buildCancelledMessage() {
+ private String buildCancelledMessage(JobInstance theInstance) {
String msg = "Job instance cancelled";
- if (myInstance.hasGatedStep()) {
- msg += " while running step " + myInstance.getCurrentGatedStepId();
+ if (theInstance.hasGatedStep()) {
+ msg += " while running step " + theInstance.getCurrentGatedStepId();
}
return msg;
}
- private void cleanupInstance() {
- switch (myInstance.getStatus()) {
+ private void cleanupInstance(JobInstance theInstance) {
+ switch (theInstance.getStatus()) {
case QUEUED:
+ // If we're still QUEUED, there are no stats to calculate
break;
+ case FINALIZE:
+ // If we're in FINALIZE, the reduction step is working so we should stay out of the way until it
+ // marks the job as COMPLETED
+ return;
case IN_PROGRESS:
case ERRORED:
- case FINALIZE:
- myJobInstanceProgressCalculator.calculateAndStoreInstanceProgress();
+ myJobInstanceProgressCalculator.calculateAndStoreInstanceProgress(theInstance);
break;
case COMPLETED:
case FAILED:
- case CANCELLED:
- if (purgeExpiredInstance()) {
+ if (purgeExpiredInstance(theInstance)) {
return;
}
break;
+ case CANCELLED:
+ purgeExpiredInstance(theInstance);
+ return;
}
- if (myInstance.isFinished() && !myInstance.isWorkChunksPurged()) {
- myInstance.setWorkChunksPurged(true);
- myJobPersistence.deleteChunks(myInstance.getInstanceId());
- myJobPersistence.updateInstance(myInstance);
+ if (theInstance.isFinished() && !theInstance.isWorkChunksPurged()) {
+ myJobInstanceProgressCalculator.calculateInstanceProgressAndPopulateInstance(theInstance);
+
+ theInstance.setWorkChunksPurged(true);
+ myJobPersistence.deleteChunksAndMarkInstanceAsChunksPurged(theInstance.getInstanceId());
+ myJobPersistence.updateInstance(theInstance);
}
}
- private boolean purgeExpiredInstance() {
- if (myInstance.getEndTime() != null) {
+ private boolean purgeExpiredInstance(JobInstance theInstance) {
+ if (theInstance.getEndTime() != null) {
long cutoff = System.currentTimeMillis() - PURGE_THRESHOLD;
- if (myInstance.getEndTime().getTime() < cutoff) {
- ourLog.info("Deleting old job instance {}", myInstance.getInstanceId());
- myJobPersistence.deleteInstanceAndChunks(myInstance.getInstanceId());
+ if (theInstance.getEndTime().getTime() < cutoff) {
+ ourLog.info("Deleting old job instance {}", theInstance.getInstanceId());
+ myJobPersistence.deleteInstanceAndChunks(theInstance.getInstanceId());
return true;
}
}
return false;
}
- private void triggerGatedExecutions() {
- if (!myInstance.isRunning()) {
+ private void triggerGatedExecutions(JobInstance theInstance) {
+ if (!theInstance.isRunning()) {
ourLog.debug("JobInstance {} is not in a \"running\" state. Status {}",
- myInstance.getInstanceId(), myInstance.getStatus().name());
+ theInstance.getInstanceId(), theInstance.getStatus().name());
return;
}
- if (!myInstance.hasGatedStep()) {
+ if (!theInstance.hasGatedStep()) {
return;
}
- JobWorkCursor, ?, ?> jobWorkCursor = JobWorkCursor.fromJobDefinitionAndRequestedStepId(myInstance.getJobDefinition(), myInstance.getCurrentGatedStepId());
+ JobDefinition extends IModelJson> jobDefinition = myJobDefinitionegistry.getJobDefinitionOrThrowException(theInstance);
+ JobWorkCursor, ?, ?> jobWorkCursor = JobWorkCursor.fromJobDefinitionAndRequestedStepId(jobDefinition, theInstance.getCurrentGatedStepId());
// final step
if (jobWorkCursor.isFinalStep() && !jobWorkCursor.isReductionStep()) {
- ourLog.debug("Job instance {} is in final step and it's not a reducer step", myInstance.getInstanceId());
+ ourLog.debug("Job instance {} is in final step and it's not a reducer step", theInstance.getInstanceId());
return;
}
- // we should not be sending a second reduction step
- // to the queue if it's in finalize status
- if (jobWorkCursor.isReductionStep() && myInstance.getStatus() == StatusEnum.FINALIZE) {
- ourLog.warn("Job instance {} is still finalizing - a second reduction job will not be started.", myInstance.getInstanceId());
- return;
- }
-
- String instanceId = myInstance.getInstanceId();
+ String instanceId = theInstance.getInstanceId();
String currentStepId = jobWorkCursor.getCurrentStepId();
boolean shouldAdvance = myJobPersistence.canAdvanceInstanceToNextStep(instanceId, currentStepId);
if (shouldAdvance) {
@@ -156,10 +166,11 @@ public class JobInstanceProcessor {
ourLog.info("All processing is complete for gated execution of instance {} step {}. Proceeding to step {}", instanceId, currentStepId, nextStepId);
if (jobWorkCursor.nextStep.isReductionStep()) {
- processReductionStep(jobWorkCursor);
+ JobWorkCursor, ?, ?> nextJobWorkCursor = JobWorkCursor.fromJobDefinitionAndRequestedStepId(jobDefinition, jobWorkCursor.nextStep.getStepId());
+ myReductionStepExecutorService.triggerReductionStep(instanceId, nextJobWorkCursor);
} else {
// otherwise, continue processing as expected
- processChunksForNextSteps(instanceId, nextStepId);
+ processChunksForNextSteps(theInstance, nextStepId);
}
} else {
ourLog.debug("Not ready to advance gated execution of instance {} from step {} to {}.",
@@ -167,7 +178,8 @@ public class JobInstanceProcessor {
}
}
- private void processChunksForNextSteps(String instanceId, String nextStepId) {
+ private void processChunksForNextSteps(JobInstance theInstance, String nextStepId) {
+ String instanceId = theInstance.getInstanceId();
List queuedChunksForNextStep = myProgressAccumulator.getChunkIdsWithStatus(instanceId, nextStepId, StatusEnum.QUEUED);
int totalChunksForNextStep = myProgressAccumulator.getTotalChunkCountForInstanceAndStep(instanceId, nextStepId);
if (totalChunksForNextStep != queuedChunksForNextStep.size()) {
@@ -175,21 +187,12 @@ public class JobInstanceProcessor {
}
List chunksToSubmit = myJobPersistence.fetchallchunkidsforstepWithStatus(instanceId, nextStepId, StatusEnum.QUEUED);
for (String nextChunkId : chunksToSubmit) {
- JobWorkNotification workNotification = new JobWorkNotification(myInstance, nextStepId, nextChunkId);
+ JobWorkNotification workNotification = new JobWorkNotification(theInstance, nextStepId, nextChunkId);
myBatchJobSender.sendWorkChannelMessage(workNotification);
}
ourLog.debug("Submitted a batch of chunks for processing. [chunkCount={}, instanceId={}, stepId={}]", chunksToSubmit.size(), instanceId, nextStepId);
- myInstance.setCurrentGatedStepId(nextStepId);
- myJobPersistence.updateInstance(myInstance);
+ theInstance.setCurrentGatedStepId(nextStepId);
+ myJobPersistence.updateInstance(theInstance);
}
- private void processReductionStep(JobWorkCursor, ?, ?> theWorkCursor) {
- JobWorkNotification workNotification = new JobWorkNotification(
- myInstance,
- theWorkCursor.nextStep.getStepId(),
- Batch2Constants.REDUCTION_STEP_CHUNK_ID_PLACEHOLDER // chunk id; we don't need it
- );
- ourLog.debug("Submitting a Work Notification for a job reduction step. No associated work chunk ids are available.");
- myBatchJobSender.sendWorkChannelMessage(workNotification);
- }
}
diff --git a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/maintenance/JobMaintenanceServiceImpl.java b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/maintenance/JobMaintenanceServiceImpl.java
index 26ec4eb8899..72224e13e11 100644
--- a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/maintenance/JobMaintenanceServiceImpl.java
+++ b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/maintenance/JobMaintenanceServiceImpl.java
@@ -22,6 +22,7 @@ package ca.uhn.fhir.batch2.maintenance;
import ca.uhn.fhir.batch2.api.IJobMaintenanceService;
import ca.uhn.fhir.batch2.api.IJobPersistence;
+import ca.uhn.fhir.batch2.api.IReductionStepExecutorService;
import ca.uhn.fhir.batch2.channel.BatchJobSender;
import ca.uhn.fhir.batch2.coordinator.JobDefinitionRegistry;
import ca.uhn.fhir.batch2.coordinator.WorkChunkProcessor;
@@ -95,6 +96,7 @@ public class JobMaintenanceServiceImpl implements IJobMaintenanceService, IHasSc
private long myScheduledJobFrequencyMillis = DateUtils.MILLIS_PER_MINUTE;
private Runnable myMaintenanceJobStartedCallback = () -> {};
private Runnable myMaintenanceJobFinishedCallback = () -> {};
+ private final IReductionStepExecutorService myReductionStepExecutorService;
/**
* Constructor
@@ -104,9 +106,10 @@ public class JobMaintenanceServiceImpl implements IJobMaintenanceService, IHasSc
DaoConfig theDaoConfig,
@Nonnull JobDefinitionRegistry theJobDefinitionRegistry,
@Nonnull BatchJobSender theBatchJobSender,
- @Nonnull WorkChunkProcessor theExecutor
- ) {
+ @Nonnull WorkChunkProcessor theExecutor,
+ @Nonnull IReductionStepExecutorService theReductionStepExecutorService) {
myDaoConfig = theDaoConfig;
+ myReductionStepExecutorService = theReductionStepExecutorService;
Validate.notNull(theSchedulerService);
Validate.notNull(theJobPersistence);
Validate.notNull(theJobDefinitionRegistry);
@@ -162,9 +165,10 @@ public class JobMaintenanceServiceImpl implements IJobMaintenanceService, IHasSc
try {
ourLog.debug("There is no clustered scheduling service. Requesting semaphore to run maintenance pass directly.");
// Some unit test, esp. the Loinc terminology tests, depend on this maintenance pass being run shortly after it is requested
- myRunMaintenanceSemaphore.tryAcquire(MAINTENANCE_TRIGGER_RUN_WITHOUT_SCHEDULER_TIMEOUT, TimeUnit.MINUTES);
- ourLog.debug("Semaphore acquired. Starting maintenance pass.");
- doMaintenancePass();
+ if (myRunMaintenanceSemaphore.tryAcquire(MAINTENANCE_TRIGGER_RUN_WITHOUT_SCHEDULER_TIMEOUT, TimeUnit.MINUTES)) {
+ ourLog.debug("Semaphore acquired. Starting maintenance pass.");
+ doMaintenancePass();
+ }
return true;
} catch (InterruptedException e) {
throw new RuntimeException(Msg.code(2134) + "Timed out waiting to run a maintenance pass", e);
@@ -179,6 +183,7 @@ public class JobMaintenanceServiceImpl implements IJobMaintenanceService, IHasSc
return myRunMaintenanceSemaphore.getQueueLength();
}
+ @Override
@VisibleForTesting
public void forceMaintenancePass() {
// to simulate a long running job!
@@ -210,11 +215,12 @@ public class JobMaintenanceServiceImpl implements IJobMaintenanceService, IHasSc
List instances = myJobPersistence.fetchInstances(INSTANCES_PER_PASS, page);
for (JobInstance instance : instances) {
- if (processedInstanceIds.add(instance.getInstanceId())) {
+ String instanceId = instance.getInstanceId();
+ if (processedInstanceIds.add(instanceId)) {
myJobDefinitionRegistry.setJobDefinition(instance);
JobInstanceProcessor jobInstanceProcessor = new JobInstanceProcessor(myJobPersistence,
- myBatchJobSender, instance, progressAccumulator, myJobExecutorSvc);
- ourLog.debug("Triggering maintenance process for instance {} in status {}", instance.getInstanceId(), instance.getStatus().name());
+ myBatchJobSender, instanceId, progressAccumulator, myReductionStepExecutorService, myJobDefinitionRegistry);
+ ourLog.debug("Triggering maintenance process for instance {} in status {}", instanceId, instance.getStatus().name());
jobInstanceProcessor.process();
}
}
diff --git a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/model/ChunkOutcome.java b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/model/ChunkOutcome.java
index 20fd16c21f8..53ab5e4b31b 100644
--- a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/model/ChunkOutcome.java
+++ b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/model/ChunkOutcome.java
@@ -23,8 +23,7 @@ package ca.uhn.fhir.batch2.model;
public class ChunkOutcome {
public enum Status {
SUCCESS,
- FAIL,
- ABORT;
+ FAILED;
}
private final Status myStatus;
diff --git a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/model/JobInstance.java b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/model/JobInstance.java
index c1752db433a..7df25731292 100644
--- a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/model/JobInstance.java
+++ b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/model/JobInstance.java
@@ -104,9 +104,6 @@ public class JobInstance extends JobInstanceStartRequest implements IModelJson,
@JsonProperty(value = "report", access = JsonProperty.Access.READ_WRITE)
private String myReport;
- @JsonIgnore
- private JobDefinition> myJobDefinition;
-
/**
* Constructor
*/
@@ -138,7 +135,6 @@ public class JobInstance extends JobInstanceStartRequest implements IModelJson,
setWorkChunksPurged(theJobInstance.isWorkChunksPurged());
setCurrentGatedStepId(theJobInstance.getCurrentGatedStepId());
setReport(theJobInstance.getReport());
- myJobDefinition = theJobInstance.getJobDefinition();
}
public void setUpdateTime(Date theUpdateTime) {
@@ -308,16 +304,10 @@ public class JobInstance extends JobInstanceStartRequest implements IModelJson,
public void setJobDefinition(JobDefinition> theJobDefinition) {
- myJobDefinition = theJobDefinition;
setJobDefinitionId(theJobDefinition.getJobDefinitionId());
setJobDefinitionVersion(theJobDefinition.getJobDefinitionVersion());
}
- @Override
- public JobDefinition> getJobDefinition() {
- return myJobDefinition;
- }
-
@Override
public boolean isCancelled() {
return myCancelled;
@@ -354,7 +344,7 @@ public class JobInstance extends JobInstanceStartRequest implements IModelJson,
.append("errorMessage", myErrorMessage)
.append("errorCount", myErrorCount)
.append("estimatedTimeRemaining", myEstimatedTimeRemaining)
- .append("record", myReport)
+ .append("report", myReport)
.toString();
}
@@ -373,6 +363,11 @@ public class JobInstance extends JobInstanceStartRequest implements IModelJson,
case IN_PROGRESS:
case FINALIZE:
return true;
+ case COMPLETED:
+ case ERRORED:
+ case QUEUED:
+ case FAILED:
+ case CANCELLED:
default:
Logs.getBatchTroubleshootingLog().debug("Status {} is considered \"not running\"", getStatus().name());
}
diff --git a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/model/StatusEnum.java b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/model/StatusEnum.java
index 1b6042bd16f..759da90f02b 100644
--- a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/model/StatusEnum.java
+++ b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/model/StatusEnum.java
@@ -189,6 +189,10 @@ public enum StatusEnum {
return myIncomplete;
}
+ public boolean isEnded() {
+ return myEnded;
+ }
+
public boolean isCancellable() {
return myIsCancellable;
}
diff --git a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/model/WorkChunk.java b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/model/WorkChunk.java
index cded53a7241..452d9ff30b3 100644
--- a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/model/WorkChunk.java
+++ b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/model/WorkChunk.java
@@ -28,9 +28,12 @@ import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import org.apache.commons.lang3.Validate;
+import org.apache.commons.lang3.builder.ToStringBuilder;
import java.util.Date;
+import static org.apache.commons.lang3.StringUtils.isNotBlank;
+
public class WorkChunk implements IModelJson {
@JsonProperty("id")
@@ -229,11 +232,36 @@ public class WorkChunk implements IModelJson {
return this;
}
+ public Date getUpdateTime() {
+ return myUpdateTime;
+ }
+
public void setUpdateTime(Date theUpdateTime) {
myUpdateTime = theUpdateTime;
}
- public Date getUpdateTime() {
- return myUpdateTime;
+ @Override
+ public String toString() {
+ ToStringBuilder b = new ToStringBuilder(this);
+ b.append("Id", myId);
+ b.append("Sequence", mySequence);
+ b.append("Status", myStatus);
+ b.append("JobDefinitionId", myJobDefinitionId);
+ b.append("JobDefinitionVersion", myJobDefinitionVersion);
+ b.append("TargetStepId", myTargetStepId);
+ b.append("InstanceId", myInstanceId);
+ b.append("Data", isNotBlank(myData) ? "(present)" : "(absent)");
+ b.append("CreateTime", myCreateTime);
+ b.append("StartTime", myStartTime);
+ b.append("EndTime", myEndTime);
+ b.append("UpdateTime", myUpdateTime);
+ b.append("RecordsProcessed", myRecordsProcessed);
+ if (isNotBlank(myErrorMessage)) {
+ b.append("ErrorMessage", myErrorMessage);
+ }
+ if (myErrorCount > 0) {
+ b.append("ErrorCount", myErrorCount);
+ }
+ return b.toString();
}
}
diff --git a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/progress/JobInstanceProgressCalculator.java b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/progress/JobInstanceProgressCalculator.java
index 79fdccac419..927dc587526 100644
--- a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/progress/JobInstanceProgressCalculator.java
+++ b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/progress/JobInstanceProgressCalculator.java
@@ -21,6 +21,7 @@ package ca.uhn.fhir.batch2.progress;
*/
import ca.uhn.fhir.batch2.api.IJobPersistence;
+import ca.uhn.fhir.batch2.coordinator.JobDefinitionRegistry;
import ca.uhn.fhir.batch2.maintenance.JobChunkProgressAccumulator;
import ca.uhn.fhir.batch2.model.JobInstance;
import ca.uhn.fhir.batch2.model.StatusEnum;
@@ -28,57 +29,70 @@ import ca.uhn.fhir.batch2.model.WorkChunk;
import ca.uhn.fhir.util.Logs;
import org.slf4j.Logger;
+import javax.annotation.Nonnull;
import java.util.Iterator;
public class JobInstanceProgressCalculator {
private static final Logger ourLog = Logs.getBatchTroubleshootingLog();
private final IJobPersistence myJobPersistence;
- private final JobInstance myInstance;
private final JobChunkProgressAccumulator myProgressAccumulator;
private final JobInstanceStatusUpdater myJobInstanceStatusUpdater;
- public JobInstanceProgressCalculator(IJobPersistence theJobPersistence, JobInstance theInstance, JobChunkProgressAccumulator theProgressAccumulator) {
+ public JobInstanceProgressCalculator(IJobPersistence theJobPersistence, JobChunkProgressAccumulator theProgressAccumulator, JobDefinitionRegistry theJobDefinitionRegistry) {
myJobPersistence = theJobPersistence;
- myInstance = theInstance;
myProgressAccumulator = theProgressAccumulator;
- myJobInstanceStatusUpdater = new JobInstanceStatusUpdater(theJobPersistence);
+ myJobInstanceStatusUpdater = new JobInstanceStatusUpdater(theJobPersistence, theJobDefinitionRegistry);
}
- public void calculateAndStoreInstanceProgress() {
- InstanceProgress instanceProgress = new InstanceProgress();
+ public void calculateAndStoreInstanceProgress(JobInstance theInstance) {
+ String instanceId = theInstance.getInstanceId();
- Iterator workChunkIterator = myJobPersistence.fetchAllWorkChunksIterator(myInstance.getInstanceId(), false);
+ InstanceProgress instanceProgress = calculateInstanceProgress(instanceId);
+
+ if (instanceProgress.failed()) {
+ myJobInstanceStatusUpdater.setFailed(theInstance);
+ }
+
+ JobInstance currentInstance = myJobPersistence.fetchInstance(instanceId).orElse(null);
+ if (currentInstance != null) {
+ instanceProgress.updateInstance(currentInstance);
+
+ if (instanceProgress.changed() || currentInstance.getStatus() == StatusEnum.IN_PROGRESS) {
+ if (currentInstance.getCombinedRecordsProcessed() > 0) {
+ ourLog.info("Job {} of type {} has status {} - {} records processed ({}/sec) - ETA: {}", currentInstance.getInstanceId(), currentInstance.getJobDefinitionId(), currentInstance.getStatus(), currentInstance.getCombinedRecordsProcessed(), currentInstance.getCombinedRecordsProcessedPerSecond(), currentInstance.getEstimatedTimeRemaining());
+ ourLog.debug(instanceProgress.toString());
+ } else {
+ ourLog.info("Job {} of type {} has status {} - {} records processed", currentInstance.getInstanceId(), currentInstance.getJobDefinitionId(), currentInstance.getStatus(), currentInstance.getCombinedRecordsProcessed());
+ ourLog.debug(instanceProgress.toString());
+ }
+ }
+
+ if (instanceProgress.changed()) {
+ if (instanceProgress.hasNewStatus()) {
+ myJobInstanceStatusUpdater.updateInstanceStatus(currentInstance, instanceProgress.getNewStatus());
+ } else {
+ myJobPersistence.updateInstance(currentInstance);
+ }
+ }
+
+ }
+ }
+
+ @Nonnull
+ private InstanceProgress calculateInstanceProgress(String instanceId) {
+ InstanceProgress instanceProgress = new InstanceProgress();
+ Iterator workChunkIterator = myJobPersistence.fetchAllWorkChunksIterator(instanceId, false);
while (workChunkIterator.hasNext()) {
WorkChunk next = workChunkIterator.next();
myProgressAccumulator.addChunk(next);
instanceProgress.addChunk(next);
}
+ return instanceProgress;
+ }
- instanceProgress.updateInstance(myInstance);
-
- if (instanceProgress.failed()) {
- myJobInstanceStatusUpdater.setFailed(myInstance);
- return;
- }
-
-
- if (instanceProgress.changed() || myInstance.getStatus() == StatusEnum.IN_PROGRESS) {
- if (myInstance.getCombinedRecordsProcessed() > 0) {
- ourLog.info("Job {} of type {} has status {} - {} records processed ({}/sec) - ETA: {}", myInstance.getInstanceId(), myInstance.getJobDefinitionId(), myInstance.getStatus(), myInstance.getCombinedRecordsProcessed(), myInstance.getCombinedRecordsProcessedPerSecond(), myInstance.getEstimatedTimeRemaining());
- ourLog.debug(instanceProgress.toString());
- } else {
- ourLog.info("Job {} of type {} has status {} - {} records processed", myInstance.getInstanceId(), myInstance.getJobDefinitionId(), myInstance.getStatus(), myInstance.getCombinedRecordsProcessed());
- ourLog.debug(instanceProgress.toString());
- }
- }
-
- if (instanceProgress.changed()) {
- if (instanceProgress.hasNewStatus()) {
- myJobInstanceStatusUpdater.updateInstanceStatus(myInstance, instanceProgress.getNewStatus());
- } else {
- myJobPersistence.updateInstance(myInstance);
- }
- }
+ public void calculateInstanceProgressAndPopulateInstance(JobInstance theInstance) {
+ InstanceProgress progress = calculateInstanceProgress(theInstance.getInstanceId());
+ progress.updateInstance(theInstance);
}
}
diff --git a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/progress/JobInstanceStatusUpdater.java b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/progress/JobInstanceStatusUpdater.java
index b1765b048bf..17f1ba76a66 100644
--- a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/progress/JobInstanceStatusUpdater.java
+++ b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/progress/JobInstanceStatusUpdater.java
@@ -23,6 +23,7 @@ package ca.uhn.fhir.batch2.progress;
import ca.uhn.fhir.batch2.api.IJobCompletionHandler;
import ca.uhn.fhir.batch2.api.IJobPersistence;
import ca.uhn.fhir.batch2.api.JobCompletionDetails;
+import ca.uhn.fhir.batch2.coordinator.JobDefinitionRegistry;
import ca.uhn.fhir.batch2.model.JobDefinition;
import ca.uhn.fhir.batch2.model.JobInstance;
import ca.uhn.fhir.batch2.model.StatusEnum;
@@ -35,9 +36,11 @@ import java.util.Optional;
public class JobInstanceStatusUpdater {
private static final Logger ourLog = Logs.getBatchTroubleshootingLog();
private final IJobPersistence myJobPersistence;
+ private final JobDefinitionRegistry myJobDefinitionRegistry;
- public JobInstanceStatusUpdater(IJobPersistence theJobPersistence) {
+ public JobInstanceStatusUpdater(IJobPersistence theJobPersistence, JobDefinitionRegistry theJobDefinitionRegistry) {
myJobPersistence = theJobPersistence;
+ myJobDefinitionRegistry = theJobDefinitionRegistry;
}
public boolean updateInstanceStatus(JobInstance theJobInstance, StatusEnum theNewStatus) {
@@ -76,13 +79,15 @@ public class JobInstanceStatusUpdater {
// the status change happened in.
if (statusChanged) {
ourLog.info("Changing job instance {} of type {} from {} to {}", theJobInstance.getInstanceId(), theJobInstance.getJobDefinitionId(), origStatus, theJobInstance.getStatus());
- handleStatusChange(origStatus, theJobInstance);
+ handleStatusChange(theJobInstance);
}
return statusChanged;
}
- private void handleStatusChange(StatusEnum theOrigStatus, JobInstance theJobInstance) {
- JobDefinition definition = (JobDefinition) theJobInstance.getJobDefinition();
+ private void handleStatusChange(JobInstance theJobInstance) {
+ JobDefinition definition = myJobDefinitionRegistry.getJobDefinitionOrThrowException(theJobInstance);
+ assert definition != null;
+
switch (theJobInstance.getStatus()) {
case COMPLETED:
invokeCompletionHandler(theJobInstance, definition, definition.getCompletionHandler());
@@ -94,6 +99,7 @@ public class JobInstanceStatusUpdater {
case ERRORED:
case QUEUED:
case IN_PROGRESS:
+ case FINALIZE:
default:
// do nothing
}
diff --git a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/util/Batch2Constants.java b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/util/Batch2Constants.java
index 8855c5dee24..fb8f85708a8 100644
--- a/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/util/Batch2Constants.java
+++ b/hapi-fhir-storage-batch2/src/main/java/ca/uhn/fhir/batch2/util/Batch2Constants.java
@@ -30,10 +30,4 @@ public class Batch2Constants {
* date when performing operations that pull resources by time windows.
*/
public static final Date BATCH_START_DATE = new InstantType("2000-01-01T00:00:00Z").getValue();
-
- /**
- * This is a placeholder chunkid for the reduction step to allow it to be
- * used in the message handling
- */
- public static final String REDUCTION_STEP_CHUNK_ID_PLACEHOLDER = "REDUCTION";
}
diff --git a/hapi-fhir-storage-batch2/src/test/java/ca/uhn/fhir/batch2/coordinator/JobCoordinatorImplTest.java b/hapi-fhir-storage-batch2/src/test/java/ca/uhn/fhir/batch2/coordinator/JobCoordinatorImplTest.java
index d5a344ddf36..0a0db91bc2e 100644
--- a/hapi-fhir-storage-batch2/src/test/java/ca/uhn/fhir/batch2/coordinator/JobCoordinatorImplTest.java
+++ b/hapi-fhir-storage-batch2/src/test/java/ca/uhn/fhir/batch2/coordinator/JobCoordinatorImplTest.java
@@ -19,6 +19,8 @@ import ca.uhn.fhir.batch2.model.MarkWorkChunkAsErrorRequest;
import ca.uhn.fhir.batch2.model.StatusEnum;
import ca.uhn.fhir.batch2.model.WorkChunk;
import ca.uhn.fhir.jpa.batch.models.Batch2JobStartResponse;
+import ca.uhn.fhir.jpa.dao.tx.IHapiTransactionService;
+import ca.uhn.fhir.jpa.dao.tx.NonTransactionalHapiTransactionService;
import ca.uhn.fhir.jpa.subscription.channel.api.IChannelReceiver;
import ca.uhn.fhir.jpa.subscription.channel.impl.LinkedBlockingChannel;
import ca.uhn.fhir.model.api.IModelJson;
@@ -36,7 +38,6 @@ import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import org.mockito.stubbing.Answer;
import org.springframework.messaging.MessageDeliveryException;
-import org.springframework.transaction.PlatformTransactionManager;
import javax.annotation.Nonnull;
import java.util.Arrays;
@@ -69,8 +70,7 @@ public class JobCoordinatorImplTest extends BaseBatch2Test {
private JobDefinitionRegistry myJobDefinitionRegistry;
@Mock
private IJobMaintenanceService myJobMaintenanceService;
- @Mock
- private PlatformTransactionManager myPlatformTransactionManager;
+ private IHapiTransactionService myTransactionService = new NonTransactionalHapiTransactionService();
@Captor
private ArgumentCaptor> myStep1ExecutionDetailsCaptor;
@@ -89,7 +89,8 @@ public class JobCoordinatorImplTest extends BaseBatch2Test {
public void beforeEach() {
// The code refactored to keep the same functionality,
// but in this service (so it's a real service here!)
- WorkChunkProcessor jobStepExecutorSvc = new WorkChunkProcessor(myJobInstancePersister, myBatchJobSender, myPlatformTransactionManager);
+ WorkChunkProcessor jobStepExecutorSvc = new WorkChunkProcessor(myJobInstancePersister, myBatchJobSender, myTransactionService);
+ JobStepExecutorFactory jobStepExecutorFactory = new JobStepExecutorFactory(myJobInstancePersister, myBatchJobSender, jobStepExecutorSvc, myJobMaintenanceService, myJobDefinitionRegistry);
mySvc = new JobCoordinatorImpl(myBatchJobSender, myWorkChannelReceiver, myJobInstancePersister, myJobDefinitionRegistry, jobStepExecutorSvc, myJobMaintenanceService);
}
@@ -138,7 +139,7 @@ public class JobCoordinatorImplTest extends BaseBatch2Test {
assertEquals(PARAM_2_VALUE, params.getParam2());
assertEquals(PASSWORD_VALUE, params.getPassword());
- verify(myJobInstancePersister, times(1)).markWorkChunkAsCompletedAndClearData(any(), eq(50));
+ verify(myJobInstancePersister, times(1)).markWorkChunkAsCompletedAndClearData(eq(INSTANCE_ID), any(), eq(50));
verify(myJobInstancePersister, times(0)).fetchWorkChunksWithoutData(any(), anyInt(), anyInt());
verify(myBatchJobSender, times(2)).sendWorkChannelMessage(any());
}
@@ -256,7 +257,7 @@ public class JobCoordinatorImplTest extends BaseBatch2Test {
assertEquals(PARAM_2_VALUE, params.getParam2());
assertEquals(PASSWORD_VALUE, params.getPassword());
- verify(myJobInstancePersister, times(1)).markWorkChunkAsCompletedAndClearData(any(), eq(50));
+ verify(myJobInstancePersister, times(1)).markWorkChunkAsCompletedAndClearData(eq(INSTANCE_ID), any(), eq(50));
verify(myBatchJobSender, times(0)).sendWorkChannelMessage(any());
}
@@ -283,7 +284,7 @@ public class JobCoordinatorImplTest extends BaseBatch2Test {
assertEquals(PARAM_2_VALUE, params.getParam2());
assertEquals(PASSWORD_VALUE, params.getPassword());
- verify(myJobInstancePersister, times(1)).markWorkChunkAsCompletedAndClearData(eq(CHUNK_ID), eq(50));
+ verify(myJobInstancePersister, times(1)).markWorkChunkAsCompletedAndClearData(eq(INSTANCE_ID), eq(CHUNK_ID), eq(50));
}
@Test
@@ -321,7 +322,7 @@ public class JobCoordinatorImplTest extends BaseBatch2Test {
assertEquals(CHUNK_ID, capturedParams.getChunkId());
assertEquals("This is an error message", capturedParams.getErrorMsg());
- verify(myJobInstancePersister, times(1)).markWorkChunkAsCompletedAndClearData(eq(CHUNK_ID), eq(0));
+ verify(myJobInstancePersister, times(1)).markWorkChunkAsCompletedAndClearData(eq(INSTANCE_ID), eq(CHUNK_ID), eq(0));
}
@@ -354,7 +355,7 @@ public class JobCoordinatorImplTest extends BaseBatch2Test {
assertEquals(PASSWORD_VALUE, params.getPassword());
verify(myJobInstancePersister, times(1)).incrementWorkChunkErrorCount(eq(CHUNK_ID), eq(2));
- verify(myJobInstancePersister, times(1)).markWorkChunkAsCompletedAndClearData(eq(CHUNK_ID), eq(50));
+ verify(myJobInstancePersister, times(1)).markWorkChunkAsCompletedAndClearData(eq(INSTANCE_ID), eq(CHUNK_ID), eq(50));
}
@Test
@@ -380,7 +381,7 @@ public class JobCoordinatorImplTest extends BaseBatch2Test {
assertEquals(PARAM_2_VALUE, params.getParam2());
assertEquals(PASSWORD_VALUE, params.getPassword());
- verify(myJobInstancePersister, times(1)).markWorkChunkAsCompletedAndClearData(eq(CHUNK_ID), eq(50));
+ verify(myJobInstancePersister, times(1)).markWorkChunkAsCompletedAndClearData(eq(INSTANCE_ID), eq(CHUNK_ID), eq(50));
}
@SuppressWarnings("unchecked")
diff --git a/hapi-fhir-storage-batch2/src/test/java/ca/uhn/fhir/batch2/coordinator/ReductionStepDataSinkTest.java b/hapi-fhir-storage-batch2/src/test/java/ca/uhn/fhir/batch2/coordinator/ReductionStepDataSinkTest.java
index aee2acee622..ea86e8bec60 100644
--- a/hapi-fhir-storage-batch2/src/test/java/ca/uhn/fhir/batch2/coordinator/ReductionStepDataSinkTest.java
+++ b/hapi-fhir-storage-batch2/src/test/java/ca/uhn/fhir/batch2/coordinator/ReductionStepDataSinkTest.java
@@ -21,11 +21,14 @@ import org.mockito.ArgumentCaptor;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
+import java.util.Collections;
import java.util.Optional;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.verify;
@@ -35,6 +38,8 @@ import static org.mockito.Mockito.when;
public class ReductionStepDataSinkTest {
private static final String INSTANCE_ID = "instanceId";
+ @Mock
+ private JobDefinitionRegistry myJobDefinitionRegistry;
private static class TestJobParameters implements IModelJson { }
@@ -75,9 +80,8 @@ public class ReductionStepDataSinkTest {
myDataSink = new ReductionStepDataSink<>(
INSTANCE_ID,
myWorkCursor,
- myJobDefinition,
- myJobPersistence
- );
+ myJobPersistence,
+ myJobDefinitionRegistry);
ourLogger = (Logger) Logs.getBatchTroubleshootingLog();
ourLogger.addAppender(myListAppender);
}
@@ -92,6 +96,7 @@ public class ReductionStepDataSinkTest {
// when
when(myJobPersistence.fetchInstance(eq(INSTANCE_ID)))
.thenReturn(Optional.of(JobInstance.fromInstanceId(INSTANCE_ID)));
+ when(myJobPersistence.fetchAllWorkChunksIterator(any(), anyBoolean())).thenReturn(Collections.emptyIterator());
// test
myDataSink.accept(chunkData);
@@ -117,6 +122,7 @@ public class ReductionStepDataSinkTest {
// when
when(myJobPersistence.fetchInstance(eq(INSTANCE_ID)))
.thenReturn(Optional.of(JobInstance.fromInstanceId(INSTANCE_ID)));
+ when(myJobPersistence.fetchAllWorkChunksIterator(any(), anyBoolean())).thenReturn(Collections.emptyIterator());
// test
myDataSink.accept(firstData);
diff --git a/hapi-fhir-storage-batch2/src/test/java/ca/uhn/fhir/batch2/coordinator/ReductionStepExecutorServiceImplTest.java b/hapi-fhir-storage-batch2/src/test/java/ca/uhn/fhir/batch2/coordinator/ReductionStepExecutorServiceImplTest.java
new file mode 100644
index 00000000000..dc09c54851e
--- /dev/null
+++ b/hapi-fhir-storage-batch2/src/test/java/ca/uhn/fhir/batch2/coordinator/ReductionStepExecutorServiceImplTest.java
@@ -0,0 +1,246 @@
+package ca.uhn.fhir.batch2.coordinator;
+
+import ca.uhn.fhir.batch2.api.ChunkExecutionDetails;
+import ca.uhn.fhir.batch2.api.IJobPersistence;
+import ca.uhn.fhir.batch2.api.IJobStepWorker;
+import ca.uhn.fhir.batch2.api.IReductionStepWorker;
+import ca.uhn.fhir.batch2.api.RunOutcome;
+import ca.uhn.fhir.batch2.api.StepExecutionDetails;
+import ca.uhn.fhir.batch2.api.VoidModel;
+import ca.uhn.fhir.batch2.model.ChunkOutcome;
+import ca.uhn.fhir.batch2.model.JobDefinition;
+import ca.uhn.fhir.batch2.model.JobDefinitionStep;
+import ca.uhn.fhir.batch2.model.JobInstance;
+import ca.uhn.fhir.batch2.model.JobWorkCursor;
+import ca.uhn.fhir.batch2.model.StatusEnum;
+import ca.uhn.fhir.batch2.model.WorkChunk;
+import ca.uhn.fhir.jpa.dao.tx.IHapiTransactionService;
+import ca.uhn.fhir.jpa.dao.tx.NonTransactionalHapiTransactionService;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtendWith;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Mock;
+import org.mockito.junit.jupiter.MockitoExtension;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Optional;
+
+import static ca.uhn.fhir.batch2.coordinator.WorkChunkProcessorTest.INSTANCE_ID;
+import static ca.uhn.fhir.batch2.coordinator.WorkChunkProcessorTest.JOB_DEFINITION_ID;
+import static ca.uhn.fhir.batch2.coordinator.WorkChunkProcessorTest.REDUCTION_STEP_ID;
+import static ca.uhn.fhir.batch2.coordinator.WorkChunkProcessorTest.StepInputData;
+import static ca.uhn.fhir.batch2.coordinator.WorkChunkProcessorTest.StepOutputData;
+import static ca.uhn.fhir.batch2.coordinator.WorkChunkProcessorTest.TestJobParameters;
+import static ca.uhn.fhir.batch2.coordinator.WorkChunkProcessorTest.createWorkChunk;
+import static ca.uhn.fhir.batch2.coordinator.WorkChunkProcessorTest.getTestJobInstance;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyList;
+import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+@ExtendWith(MockitoExtension.class)
+public class ReductionStepExecutorServiceImplTest {
+
+ private final IHapiTransactionService myTransactionService = new NonTransactionalHapiTransactionService();
+ @Mock
+ private IJobPersistence myJobPersistence;
+ @Mock
+ private IReductionStepWorker myReductionStepWorker;
+ // @Mock
+// private JobDefinitionStep myPreviousStep;
+// @Mock
+// private JobDefinitionStep myCurrentStep;
+ private ReductionStepExecutorServiceImpl mySvc;
+ private JobDefinitionRegistry myJobDefinitionRegistry = new JobDefinitionRegistry();
+
+ @BeforeEach
+ public void before() {
+ mySvc = new ReductionStepExecutorServiceImpl(myJobPersistence, myTransactionService, myJobDefinitionRegistry);
+ }
+
+
+ @Test
+ public void doExecution_reductionWithChunkFailed_marksAllFutureChunksAsFailedButPreviousAsSuccess() {
+ // setup
+ List chunkIds = Arrays.asList("chunk1", "chunk2");
+ List chunks = new ArrayList<>();
+ for (String id : chunkIds) {
+ chunks.add(createWorkChunk(id));
+ }
+ JobInstance jobInstance = getTestJobInstance();
+ jobInstance.setStatus(StatusEnum.IN_PROGRESS);
+ JobWorkCursor workCursor = mock(JobWorkCursor.class);
+
+
+ // when
+ when(workCursor.getCurrentStep()).thenReturn((JobDefinitionStep) createJobDefinition().getSteps().get(1));
+ when(workCursor.getJobDefinition()).thenReturn(createJobDefinition());
+ when(myJobPersistence.fetchInstance(eq(INSTANCE_ID))).thenReturn(Optional.of(jobInstance));
+ when(myJobPersistence.markInstanceAsStatus(eq(INSTANCE_ID), eq(StatusEnum.FINALIZE))).thenReturn(true);
+ when(myJobPersistence.fetchAllWorkChunksForStepStream(eq(INSTANCE_ID), eq(REDUCTION_STEP_ID)))
+ .thenReturn(chunks.stream());
+ when(myReductionStepWorker.consume(any(ChunkExecutionDetails.class)))
+ .thenReturn(ChunkOutcome.SUCCESS())
+ .thenReturn(new ChunkOutcome(ChunkOutcome.Status.FAILED));
+
+ // test
+ ReductionStepChunkProcessingResponse result = mySvc.executeReductionStep(INSTANCE_ID, workCursor);
+
+ // verification
+ assertFalse(result.isSuccessful());
+ ArgumentCaptor submittedListIds = ArgumentCaptor.forClass(List.class);
+ ArgumentCaptor statusCaptor = ArgumentCaptor.forClass(StatusEnum.class);
+ verify(myJobPersistence, times(chunkIds.size()))
+ .markWorkChunksWithStatusAndWipeData(
+ eq(INSTANCE_ID),
+ submittedListIds.capture(),
+ statusCaptor.capture(),
+ any()
+ );
+ assertEquals(2, submittedListIds.getAllValues().size());
+ List list1 = submittedListIds.getAllValues().get(0);
+ List list2 = submittedListIds.getAllValues().get(1);
+ assertTrue(list1.contains(chunkIds.get(0)));
+ assertTrue(list2.contains(chunkIds.get(1)));
+
+ // assumes the order of which is called first
+ // successes, then failures
+ assertEquals(2, statusCaptor.getAllValues().size());
+ List statuses = statusCaptor.getAllValues();
+ assertEquals(StatusEnum.COMPLETED, statuses.get(0));
+ assertEquals(StatusEnum.FAILED, statuses.get(1));
+ }
+
+
+ @Test
+ public void doExecution_reductionStepWithValidInput_executesAsExpected() {
+ // setup
+ List chunkIds = Arrays.asList("chunk1", "chunk2");
+ List chunks = new ArrayList<>();
+ for (String id : chunkIds) {
+ chunks.add(createWorkChunk(id));
+ }
+ JobInstance jobInstance = getTestJobInstance();
+ jobInstance.setStatus(StatusEnum.IN_PROGRESS);
+ JobWorkCursor workCursor = mock(JobWorkCursor.class);
+
+
+ // when
+ when(workCursor.getCurrentStep()).thenReturn((JobDefinitionStep) createJobDefinition().getSteps().get(1));
+ when(workCursor.getJobDefinition()).thenReturn(createJobDefinition());
+ when(myJobPersistence.fetchInstance(eq(INSTANCE_ID))).thenReturn(Optional.of(jobInstance));
+ when(myJobPersistence.markInstanceAsStatus(eq(INSTANCE_ID), eq(StatusEnum.FINALIZE))).thenReturn(true);
+ when(myJobPersistence.fetchAllWorkChunksForStepStream(eq(INSTANCE_ID), eq(REDUCTION_STEP_ID)))
+ .thenReturn(chunks.stream());
+ when(myReductionStepWorker.consume(any(ChunkExecutionDetails.class)))
+ .thenReturn(ChunkOutcome.SUCCESS());
+ when(myReductionStepWorker.run(any(StepExecutionDetails.class), any(BaseDataSink.class)))
+ .thenReturn(RunOutcome.SUCCESS);
+
+ // test
+ ReductionStepChunkProcessingResponse result = mySvc.executeReductionStep(INSTANCE_ID, workCursor);
+
+ // verify
+ ArgumentCaptor chunkCaptor = ArgumentCaptor.forClass(ChunkExecutionDetails.class);
+ verify(myReductionStepWorker, times(chunks.size()))
+ .consume(chunkCaptor.capture());
+ List chunksSubmitted = chunkCaptor.getAllValues();
+ assertEquals(chunks.size(), chunksSubmitted.size());
+ for (ChunkExecutionDetails submitted : chunksSubmitted) {
+ assertTrue(chunkIds.contains(submitted.getChunkId()));
+ }
+
+ assertTrue(result.isSuccessful());
+ ArgumentCaptor> chunkIdCaptor = ArgumentCaptor.forClass(List.class);
+ verify(myJobPersistence).markWorkChunksWithStatusAndWipeData(eq(INSTANCE_ID),
+ chunkIdCaptor.capture(), eq(StatusEnum.COMPLETED), eq(null));
+ List capturedIds = chunkIdCaptor.getValue();
+ assertEquals(chunkIds.size(), capturedIds.size());
+ for (String chunkId : chunkIds) {
+ assertTrue(capturedIds.contains(chunkId));
+ }
+
+ }
+
+
+ @Test
+ public void doExecution_reductionStepWithErrors_returnsFalseAndMarksPreviousChunksFailed() {
+ // setup
+ List chunkIds = Arrays.asList("chunk1", "chunk2");
+ List chunks = new ArrayList<>();
+ for (String id : chunkIds) {
+ chunks.add(createWorkChunk(id));
+ }
+ JobInstance jobInstance = getTestJobInstance();
+ jobInstance.setStatus(StatusEnum.IN_PROGRESS);
+ JobWorkCursor workCursor = mock(JobWorkCursor.class);
+
+ // when
+ when(workCursor.getCurrentStep()).thenReturn((JobDefinitionStep) createJobDefinition().getSteps().get(1));
+ when(workCursor.getJobDefinition()).thenReturn(createJobDefinition());
+ when(myJobPersistence.fetchInstance(eq(INSTANCE_ID))).thenReturn(Optional.of(jobInstance));
+ when(myJobPersistence.fetchAllWorkChunksForStepStream(eq(INSTANCE_ID), eq(REDUCTION_STEP_ID))).thenReturn(chunks.stream());
+ when(myJobPersistence.markInstanceAsStatus(eq(INSTANCE_ID), eq(StatusEnum.FINALIZE))).thenReturn(true);
+ doThrow(new RuntimeException("This is an error")).when(myReductionStepWorker).consume(any(ChunkExecutionDetails.class));
+
+ // test
+ ReductionStepChunkProcessingResponse result = mySvc.executeReductionStep(INSTANCE_ID, workCursor);
+
+ // verify
+ assertFalse(result.isSuccessful());
+ ArgumentCaptor chunkIdCaptor = ArgumentCaptor.forClass(String.class);
+ ArgumentCaptor errorCaptor = ArgumentCaptor.forClass(String.class);
+ verify(myJobPersistence, times(chunkIds.size()))
+ .markWorkChunkAsFailed(chunkIdCaptor.capture(), errorCaptor.capture());
+ List chunkIdsCaptured = chunkIdCaptor.getAllValues();
+ List errorsCaptured = errorCaptor.getAllValues();
+ for (int i = 0; i < chunkIds.size(); i++) {
+ String cId = chunkIdsCaptured.get(i);
+ String error = errorsCaptured.get(i);
+
+ assertTrue(chunkIds.contains(cId));
+ assertTrue(error.contains("Reduction step failed to execute chunk reduction for chunk"));
+ }
+ verify(myJobPersistence, never())
+ .markWorkChunksWithStatusAndWipeData(anyString(), anyList(), any(), anyString());
+ verify(myReductionStepWorker, never())
+ .run(any(), any());
+ }
+
+ @SuppressWarnings("unchecked")
+ private JobDefinition createJobDefinition() {
+ return JobDefinition.newBuilder()
+ .setJobDefinitionId(JOB_DEFINITION_ID)
+ .setJobDescription("Reduction job description")
+ .setJobDefinitionVersion(1)
+ .gatedExecution()
+ .setParametersType(TestJobParameters.class)
+ .addFirstStep(
+ "step 1",
+ "description 1",
+ VoidModel.class,
+ mock(IJobStepWorker.class) // we don't care about this step - we just need it
+ )
+ .addFinalReducerStep(
+ REDUCTION_STEP_ID,
+ "description 2",
+ StepOutputData.class,
+ myReductionStepWorker
+ )
+ .build();
+ }
+
+
+}
diff --git a/hapi-fhir-storage-batch2/src/test/java/ca/uhn/fhir/batch2/coordinator/WorkChunkProcessorTest.java b/hapi-fhir-storage-batch2/src/test/java/ca/uhn/fhir/batch2/coordinator/WorkChunkProcessorTest.java
index 78c66f88666..b649babc692 100644
--- a/hapi-fhir-storage-batch2/src/test/java/ca/uhn/fhir/batch2/coordinator/WorkChunkProcessorTest.java
+++ b/hapi-fhir-storage-batch2/src/test/java/ca/uhn/fhir/batch2/coordinator/WorkChunkProcessorTest.java
@@ -23,6 +23,9 @@ import ca.uhn.fhir.batch2.model.MarkWorkChunkAsErrorRequest;
import ca.uhn.fhir.batch2.model.StatusEnum;
import ca.uhn.fhir.batch2.model.WorkChunk;
import ca.uhn.fhir.batch2.model.WorkChunkData;
+import ca.uhn.fhir.jpa.dao.tx.HapiTransactionService;
+import ca.uhn.fhir.jpa.dao.tx.IHapiTransactionService;
+import ca.uhn.fhir.jpa.dao.tx.NonTransactionalHapiTransactionService;
import ca.uhn.fhir.model.api.IModelJson;
import ca.uhn.fhir.util.JsonUtil;
import org.junit.jupiter.api.BeforeEach;
@@ -60,8 +63,8 @@ import static org.mockito.Mockito.when;
@SuppressWarnings({"unchecked", "rawtypes"})
@ExtendWith(MockitoExtension.class)
public class WorkChunkProcessorTest {
- private static final String INSTANCE_ID = "instanceId";
- private static final String JOB_DEFINITION_ID = "jobDefId";
+ static final String INSTANCE_ID = "instanceId";
+ static final String JOB_DEFINITION_ID = "jobDefId";
public static final String REDUCTION_STEP_ID = "step last";
// static internal use classes
@@ -72,11 +75,11 @@ public class WorkChunkProcessorTest {
FINAL
}
- private static class TestJobParameters implements IModelJson { }
+ static class TestJobParameters implements IModelJson { }
- private static class StepInputData implements IModelJson { }
+ static class StepInputData implements IModelJson { }
- private static class StepOutputData implements IModelJson { }
+ static class StepOutputData implements IModelJson { }
private static class TestDataSink extends BaseDataSink {
@@ -105,8 +108,8 @@ public class WorkChunkProcessorTest {
// our test class
private class TestWorkChunkProcessor extends WorkChunkProcessor {
- public TestWorkChunkProcessor(IJobPersistence thePersistence, BatchJobSender theSender, PlatformTransactionManager theTransactionManager) {
- super(thePersistence, theSender, theTransactionManager);
+ public TestWorkChunkProcessor(IJobPersistence thePersistence, BatchJobSender theSender, IHapiTransactionService theHapiTransactionService) {
+ super(thePersistence, theSender, theHapiTransactionService);
}
@Override
@@ -139,8 +142,7 @@ public class WorkChunkProcessorTest {
@Mock
private BatchJobSender myJobSender;
- @Mock
- private PlatformTransactionManager myMockTransactionManager;
+ private IHapiTransactionService myMockTransactionManager = new NonTransactionalHapiTransactionService();
private TestWorkChunkProcessor myExecutorSvc;
@@ -186,218 +188,6 @@ public class WorkChunkProcessorTest {
return step;
}
- @Test
- public void doExecution_reductionStepWithValidInput_executesAsExpected() {
- // setup
- List chunkIds = Arrays.asList("chunk1", "chunk2");
- List chunks = new ArrayList<>();
- for (String id : chunkIds) {
- chunks.add(createWorkChunk(id));
- }
- JobInstance jobInstance = getTestJobInstance();
- JobWorkCursor workCursor = mock(JobWorkCursor.class);
- JobDefinitionStep step = mockOutWorkCursor(StepType.REDUCTION, workCursor, true, false);
-
- // when
- when(workCursor.isReductionStep())
- .thenReturn(true);
- when(myJobPersistence.fetchAllWorkChunksForStepStream(eq(INSTANCE_ID), eq(REDUCTION_STEP_ID)))
- .thenReturn(chunks.stream());
- when(myJobPersistence.markInstanceAsStatus(eq(INSTANCE_ID), eq(StatusEnum.FINALIZE))).thenReturn(true);
- when(myReductionStep.consume(any(ChunkExecutionDetails.class)))
- .thenReturn(ChunkOutcome.SUCCESS());
- when(myReductionStep.run(
- any(StepExecutionDetails.class), any(IJobDataSink.class)
- )).thenReturn(RunOutcome.SUCCESS);
-
- // test
- JobStepExecutorOutput, ?, ?> result = myExecutorSvc.doExecution(
- workCursor,
- jobInstance,
- null
- );
-
- // verify
- ArgumentCaptor chunkCaptor = ArgumentCaptor.forClass(ChunkExecutionDetails.class);
- verify(myReductionStep, times(chunks.size()))
- .consume(chunkCaptor.capture());
- List chunksSubmitted = chunkCaptor.getAllValues();
- assertEquals(chunks.size(), chunksSubmitted.size());
- for (ChunkExecutionDetails submitted : chunksSubmitted) {
- assertTrue(chunkIds.contains(submitted.getChunkId()));
- }
-
- assertTrue(result.isSuccessful());
- assertTrue(myDataSink.myActualDataSink instanceof ReductionStepDataSink);
- ArgumentCaptor executionDetsCaptor = ArgumentCaptor.forClass(StepExecutionDetails.class);
- verify(myReductionStep).run(executionDetsCaptor.capture(), eq(myDataSink));
- assertTrue(executionDetsCaptor.getValue() instanceof ReductionStepExecutionDetails);
- ArgumentCaptor> chunkIdCaptor = ArgumentCaptor.forClass(List.class);
- verify(myJobPersistence).markWorkChunksWithStatusAndWipeData(eq(INSTANCE_ID),
- chunkIdCaptor.capture(), eq(StatusEnum.COMPLETED), eq(null));
- List capturedIds = chunkIdCaptor.getValue();
- assertEquals(chunkIds.size(), capturedIds.size());
- for (String chunkId : chunkIds) {
- assertTrue(capturedIds.contains(chunkId));
- }
-
- // nevers
- verifyNoErrors(0);
- verify(myNonReductionStep, never()).run(any(), any());
- verify(myLastStep, never()).run(any(), any());
- }
-
- @Test
- public void doExecution_reductionStepWithErrors_returnsFalseAndMarksPreviousChunksFailed() {
- // setup
- String errorMsg = "Exceptional!";
- List chunkIds = Arrays.asList("chunk1", "chunk2");
- List chunks = new ArrayList<>();
- for (String id : chunkIds) {
- chunks.add(createWorkChunk(id));
- }
- JobInstance jobInstance = getTestJobInstance();
- JobWorkCursor workCursor = mock(JobWorkCursor.class);
- JobDefinitionStep step = mockOutWorkCursor(StepType.REDUCTION, workCursor, false, false);
-
- // when
- when(workCursor.isReductionStep())
- .thenReturn(true);
- when(myJobPersistence.fetchAllWorkChunksForStepStream(eq(INSTANCE_ID), eq(REDUCTION_STEP_ID)))
- .thenReturn(chunks.stream());
- when(myJobPersistence.markInstanceAsStatus(eq(INSTANCE_ID), eq(StatusEnum.FINALIZE))).thenReturn(true);
- doThrow(new RuntimeException(errorMsg))
- .when(myReductionStep).consume(any(ChunkExecutionDetails.class));
-
- // test
- JobStepExecutorOutput, ?, ?> result = myExecutorSvc.doExecution(
- workCursor,
- jobInstance,
- null
- );
-
- // verify
- assertFalse(result.isSuccessful());
- ArgumentCaptor chunkIdCaptor = ArgumentCaptor.forClass(String.class);
- ArgumentCaptor errorCaptor = ArgumentCaptor.forClass(String.class);
- verify(myJobPersistence, times(chunkIds.size()))
- .markWorkChunkAsFailed(chunkIdCaptor.capture(), errorCaptor.capture());
- List chunkIdsCaptured = chunkIdCaptor.getAllValues();
- List errorsCaptured = errorCaptor.getAllValues();
- for (int i = 0; i < chunkIds.size(); i++) {
- String cId = chunkIdsCaptured.get(i);
- String error = errorsCaptured.get(i);
-
- assertTrue(chunkIds.contains(cId));
- assertTrue(error.contains("Reduction step failed to execute chunk reduction for chunk"));
- }
- verify(myJobPersistence, never())
- .markWorkChunksWithStatusAndWipeData(anyString(), anyList(), any(), anyString());
- verify(myReductionStep, never())
- .run(any(), any());
- }
-
- @Test
- public void doExecution_reductionStepWithChunkFailures_marksChunkAsFailedButExecutesRestAsSuccess() {
- // setup
- List chunkIds = Arrays.asList("chunk1", "chunk2");
- List chunks = new ArrayList<>();
- for (String id : chunkIds) {
- chunks.add(createWorkChunk(id));
- }
- JobInstance jobInstance = getTestJobInstance();
- JobWorkCursor workCursor = mock(JobWorkCursor.class);
- JobDefinitionStep step = mockOutWorkCursor(StepType.REDUCTION, workCursor, false, false);
-
- // when
- when(workCursor.isReductionStep())
- .thenReturn(true);
- when(myJobPersistence.fetchAllWorkChunksForStepStream(eq(INSTANCE_ID), eq(REDUCTION_STEP_ID)))
- .thenReturn(chunks.stream());
- when(myJobPersistence.markInstanceAsStatus(eq(INSTANCE_ID), eq(StatusEnum.FINALIZE))).thenReturn(true);
- when(myReductionStep.consume(any(ChunkExecutionDetails.class)))
- .thenReturn(ChunkOutcome.SUCCESS())
- .thenReturn(new ChunkOutcome(ChunkOutcome.Status.FAIL));
- when(myReductionStep.run(any(StepExecutionDetails.class), any(BaseDataSink.class)))
- .thenReturn(RunOutcome.SUCCESS);
-
- // test
- JobStepExecutorOutput, ?, ?> result = myExecutorSvc.doExecution(
- workCursor,
- jobInstance,
- null
- );
-
- // verify
- assertFalse(result.isSuccessful());
- verify(myJobPersistence)
- .markWorkChunkAsFailed(eq(chunkIds.get(1)), anyString());
- ArgumentCaptor chunkListCaptor = ArgumentCaptor.forClass(List.class);
- verify(myJobPersistence)
- .markWorkChunksWithStatusAndWipeData(eq(INSTANCE_ID),
- chunkListCaptor.capture(),
- eq(StatusEnum.COMPLETED),
- any());
- List completedIds = chunkListCaptor.getValue();
- assertEquals(1, completedIds.size());
- assertEquals(chunkIds.get(0), completedIds.get(0));
- }
-
- @Test
- public void doExecution_reductionWithChunkAbort_marksAllFutureChunksAsFailedButPreviousAsSuccess() {
- // setup
- List chunkIds = Arrays.asList("chunk1", "chunk2");
- List chunks = new ArrayList<>();
- for (String id : chunkIds) {
- chunks.add(createWorkChunk(id));
- }
- JobInstance jobInstance = getTestJobInstance();
- JobWorkCursor workCursor = mock(JobWorkCursor.class);
- JobDefinitionStep step = mockOutWorkCursor(StepType.REDUCTION, workCursor, false, false);
-
- // when
- when(workCursor.isReductionStep())
- .thenReturn(true);
- when(myJobPersistence.markInstanceAsStatus(eq(INSTANCE_ID), eq(StatusEnum.FINALIZE))).thenReturn(true);
- when(myJobPersistence.fetchAllWorkChunksForStepStream(eq(INSTANCE_ID), eq(REDUCTION_STEP_ID)))
- .thenReturn(chunks.stream());
- when(myReductionStep.consume(any(ChunkExecutionDetails.class)))
- .thenReturn(ChunkOutcome.SUCCESS())
- .thenReturn(new ChunkOutcome(ChunkOutcome.Status.ABORT));
- when(myReductionStep.run(any(StepExecutionDetails.class), any(BaseDataSink.class)))
- .thenReturn(RunOutcome.SUCCESS);
-
- // test
- JobStepExecutorOutput, ?, ?> result = myExecutorSvc.doExecution(
- workCursor,
- jobInstance,
- null
- );
-
- // verification
- assertFalse(result.isSuccessful());
- ArgumentCaptor submittedListIds = ArgumentCaptor.forClass(List.class);
- ArgumentCaptor statusCaptor = ArgumentCaptor.forClass(StatusEnum.class);
- verify(myJobPersistence, times(chunkIds.size()))
- .markWorkChunksWithStatusAndWipeData(
- eq(INSTANCE_ID),
- submittedListIds.capture(),
- statusCaptor.capture(),
- any()
- );
- assertEquals(2, submittedListIds.getAllValues().size());
- List list1 = submittedListIds.getAllValues().get(0);
- List list2 = submittedListIds.getAllValues().get(1);
- assertTrue(list1.contains(chunkIds.get(0)));
- assertTrue(list2.contains(chunkIds.get(1)));
-
- // assumes the order of which is called first
- // successes, then failures
- assertEquals(2, statusCaptor.getAllValues().size());
- List statuses = statusCaptor.getAllValues();
- assertEquals(StatusEnum.COMPLETED, statuses.get(0));
- assertEquals(StatusEnum.FAILED, statuses.get(1));
- }
@Test
public void doExecution_nonReductionIntermediateStepWithValidInput_executesAsExpected() {
@@ -437,7 +227,7 @@ public class WorkChunkProcessorTest {
// verify
assertTrue(result.isSuccessful());
verify(myJobPersistence)
- .markWorkChunkAsCompletedAndClearData(eq(chunk.getId()), anyInt());
+ .markWorkChunkAsCompletedAndClearData(any(), eq(chunk.getId()), anyInt());
assertTrue(myDataSink.myActualDataSink instanceof JobDataSink);
if (theRecoveredErrorsForDataSink > 0) {
@@ -616,14 +406,14 @@ public class WorkChunkProcessorTest {
.fetchAllWorkChunksForStepStream(anyString(), anyString());
}
- private JobInstance getTestJobInstance() {
+ static JobInstance getTestJobInstance() {
JobInstance instance = JobInstance.fromInstanceId(INSTANCE_ID);
instance.setParameters(new TestJobParameters());
return instance;
}
- private WorkChunk createWorkChunk(String theId) {
+ static WorkChunk createWorkChunk(String theId) {
WorkChunk chunk = new WorkChunk();
chunk.setInstanceId(INSTANCE_ID);
chunk.setId(theId);
diff --git a/hapi-fhir-storage-batch2/src/test/java/ca/uhn/fhir/batch2/maintenance/JobMaintenanceServiceImplTest.java b/hapi-fhir-storage-batch2/src/test/java/ca/uhn/fhir/batch2/maintenance/JobMaintenanceServiceImplTest.java
index 063a799c61d..9fb02c0764c 100644
--- a/hapi-fhir-storage-batch2/src/test/java/ca/uhn/fhir/batch2/maintenance/JobMaintenanceServiceImplTest.java
+++ b/hapi-fhir-storage-batch2/src/test/java/ca/uhn/fhir/batch2/maintenance/JobMaintenanceServiceImplTest.java
@@ -2,6 +2,7 @@ package ca.uhn.fhir.batch2.maintenance;
import ca.uhn.fhir.batch2.api.IJobCompletionHandler;
import ca.uhn.fhir.batch2.api.IJobPersistence;
+import ca.uhn.fhir.batch2.api.IReductionStepExecutorService;
import ca.uhn.fhir.batch2.api.JobCompletionDetails;
import ca.uhn.fhir.batch2.channel.BatchJobSender;
import ca.uhn.fhir.batch2.coordinator.BaseBatch2Test;
@@ -82,8 +83,8 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
private ArgumentCaptor> myMessageCaptor;
@Captor
private ArgumentCaptor> myJobCompletionCaptor;
-
- private final JobInstance ourQueuedInstance = new JobInstance().setStatus(StatusEnum.QUEUED);
+ @Mock
+ private IReductionStepExecutorService myReductionStepExecutorService;
@BeforeEach
public void beforeEach() {
@@ -94,8 +95,8 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
myDaoConfig,
myJobDefinitionRegistry,
batchJobSender,
- myJobExecutorSvc
- );
+ myJobExecutorSvc,
+ myReductionStepExecutorService);
myDaoConfig.setJobFastTrackingEnabled(true);
}
@@ -107,9 +108,6 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
myJobDefinitionRegistry.addJobDefinition(createJobDefinition());
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(createInstance()));
- when(myJobPersistence.fetchAllWorkChunksIterator(eq(INSTANCE_ID), eq(false)))
- .thenReturn(chunks.iterator());
-
mySvc.runMaintenancePass();
verify(myJobPersistence, never()).updateInstance(any());
@@ -126,6 +124,7 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
JobCoordinatorImplTest.createWorkChunkStep3().setStatus(StatusEnum.COMPLETED).setStartTime(parseTime("2022-02-12T14:01:00-04:00")).setEndTime(parseTime("2022-02-12T14:10:00-04:00")).setRecordsProcessed(25)
);
myJobDefinitionRegistry.addJobDefinition(createJobDefinition());
+ when(myJobPersistence.fetchInstance(eq(INSTANCE_ID))).thenReturn(Optional.of(createInstance()));
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(createInstance()));
when(myJobPersistence.fetchAllWorkChunksIterator(eq(INSTANCE_ID), eq(false)))
.thenReturn(chunks.iterator());
@@ -160,6 +159,7 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
myJobDefinitionRegistry.addJobDefinition(createJobDefinition());
JobInstance instance1 = createInstance();
instance1.setErrorMessage("This is an error message");
+ when(myJobPersistence.fetchInstance(eq(INSTANCE_ID))).thenReturn(Optional.of(createInstance()));
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(instance1));
when(myJobPersistence.fetchAllWorkChunksIterator(eq(INSTANCE_ID), eq(false)))
.thenReturn(chunks.iterator());
@@ -199,6 +199,7 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
JobInstance instance1 = createInstance();
instance1.setCurrentGatedStepId(STEP_1);
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(instance1));
+ when(myJobPersistence.fetchInstance(eq(INSTANCE_ID))).thenReturn(Optional.of(instance1));
// Execute
mySvc.runMaintenancePass();
@@ -221,6 +222,7 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
instance.setStatus(StatusEnum.FAILED);
instance.setEndTime(parseTime("2001-01-01T12:12:12Z"));
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(instance));
+ when(myJobPersistence.fetchInstance(eq(INSTANCE_ID))).thenReturn(Optional.of(instance));
mySvc.runMaintenancePass();
@@ -253,11 +255,11 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
);
myJobDefinitionRegistry.addJobDefinition(createJobDefinition(t -> t.completionHandler(myCompletionHandler)));
- when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(createInstance()));
- when(myJobPersistence.fetchAllWorkChunksIterator(eq(INSTANCE_ID), anyBoolean()))
- .thenReturn(chunks.iterator());
+ JobInstance instance1 = createInstance();
+ when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(instance1));
+ when(myJobPersistence.fetchAllWorkChunksIterator(eq(INSTANCE_ID), anyBoolean())).thenAnswer(t->chunks.iterator());
when(myJobPersistence.updateInstance(any())).thenReturn(true);
- when(myJobPersistence.fetchInstance(INSTANCE_ID)).thenReturn(Optional.of(ourQueuedInstance));
+ when(myJobPersistence.fetchInstance(INSTANCE_ID)).thenReturn(Optional.of(instance1));
// Execute
@@ -274,7 +276,7 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
assertEquals(0.25, instance.getCombinedRecordsProcessedPerSecond());
assertEquals(parseTime("2022-02-12T14:10:00-04:00"), instance.getEndTime());
- verify(myJobPersistence, times(1)).deleteChunks(eq(INSTANCE_ID));
+ verify(myJobPersistence, times(1)).deleteChunksAndMarkInstanceAsChunksPurged(eq(INSTANCE_ID));
verify(myCompletionHandler, times(1)).jobComplete(myJobCompletionCaptor.capture());
verifyNoMoreInteractions(myJobPersistence);
@@ -306,14 +308,14 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
);
myJobDefinitionRegistry.addJobDefinition(createJobDefinition());
+ when(myJobPersistence.fetchInstance(eq(INSTANCE_ID))).thenReturn(Optional.of(createInstance()));
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(createInstance()));
when(myJobPersistence.fetchAllWorkChunksIterator(eq(INSTANCE_ID), anyBoolean()))
- .thenReturn(chunks.iterator());
- when(myJobPersistence.fetchInstance(INSTANCE_ID)).thenReturn(Optional.of(ourQueuedInstance));
+ .thenAnswer(t->chunks.iterator());
mySvc.runMaintenancePass();
- verify(myJobPersistence, times(2)).updateInstance(myInstanceCaptor.capture());
+ verify(myJobPersistence, times(3)).updateInstance(myInstanceCaptor.capture());
JobInstance instance = myInstanceCaptor.getAllValues().get(0);
assertEquals(0.8333333333333334, instance.getProgress());
@@ -323,7 +325,7 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
assertEquals(0.25, instance.getCombinedRecordsProcessedPerSecond());
assertEquals(parseTime("2022-02-12T14:10:00-04:00"), instance.getEndTime());
- verify(myJobPersistence, times(1)).deleteChunks(eq(INSTANCE_ID));
+ verify(myJobPersistence, times(1)).deleteChunksAndMarkInstanceAsChunksPurged(eq(INSTANCE_ID));
verifyNoMoreInteractions(myJobPersistence);
}
@@ -343,11 +345,11 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
JobCoordinatorImplTest.createWorkChunkStep2().setStatus(StatusEnum.QUEUED).setId(CHUNK_ID_2)
);
myJobDefinitionRegistry.addJobDefinition(createJobDefinition(JobDefinition.Builder::gatedExecution));
- when(myJobPersistence.fetchAllWorkChunksIterator(eq(INSTANCE_ID), anyBoolean()))
- .thenReturn(chunks.iterator());
+ when(myJobPersistence.fetchAllWorkChunksIterator(eq(INSTANCE_ID), anyBoolean())).thenAnswer(t->chunks.iterator());
JobInstance instance1 = createInstance();
instance1.setCurrentGatedStepId(STEP_1);
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(instance1));
+ when(myJobPersistence.fetchInstance(eq(INSTANCE_ID))).thenReturn(Optional.of(instance1));
mySvc.runMaintenancePass();
@@ -373,11 +375,11 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
JobCoordinatorImplTest.createWorkChunkStep2().setStatus(StatusEnum.QUEUED).setId(CHUNK_ID_2)
);
myJobDefinitionRegistry.addJobDefinition(createJobDefinition(JobDefinition.Builder::gatedExecution));
- when(myJobPersistence.fetchAllWorkChunksIterator(eq(INSTANCE_ID), anyBoolean()))
- .thenReturn(chunks.iterator());
+ when(myJobPersistence.fetchAllWorkChunksIterator(eq(INSTANCE_ID), anyBoolean())).thenAnswer(t->chunks.iterator());
JobInstance instance1 = createInstance();
instance1.setCurrentGatedStepId(STEP_1);
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(instance1));
+ when(myJobPersistence.fetchInstance(eq(INSTANCE_ID))).thenReturn(Optional.of(instance1));
mySvc.runMaintenancePass();
mySvc.runMaintenancePass();
diff --git a/hapi-fhir-storage-batch2/src/test/java/ca/uhn/fhir/batch2/progress/JobInstanceStatusUpdaterTest.java b/hapi-fhir-storage-batch2/src/test/java/ca/uhn/fhir/batch2/progress/JobInstanceStatusUpdaterTest.java
index a371783dbc5..3714931b797 100644
--- a/hapi-fhir-storage-batch2/src/test/java/ca/uhn/fhir/batch2/progress/JobInstanceStatusUpdaterTest.java
+++ b/hapi-fhir-storage-batch2/src/test/java/ca/uhn/fhir/batch2/progress/JobInstanceStatusUpdaterTest.java
@@ -4,6 +4,7 @@ import ca.uhn.fhir.batch2.api.IJobCompletionHandler;
import ca.uhn.fhir.batch2.api.IJobInstance;
import ca.uhn.fhir.batch2.api.IJobPersistence;
import ca.uhn.fhir.batch2.api.JobCompletionDetails;
+import ca.uhn.fhir.batch2.coordinator.JobDefinitionRegistry;
import ca.uhn.fhir.batch2.model.JobDefinition;
import ca.uhn.fhir.batch2.model.JobInstance;
import ca.uhn.fhir.batch2.model.StatusEnum;
@@ -21,6 +22,7 @@ import java.util.concurrent.atomic.AtomicReference;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
@ExtendWith(MockitoExtension.class)
@@ -35,7 +37,8 @@ class JobInstanceStatusUpdaterTest {
IJobPersistence myJobPersistence;
@Mock
private JobDefinition myJobDefinition;
-
+ @Mock
+ private JobDefinitionRegistry myJobDefinitionRegistry;
@InjectMocks
JobInstanceStatusUpdater mySvc;
private JobInstance myInstance;
@@ -52,6 +55,8 @@ class JobInstanceStatusUpdaterTest {
myInstance.setParameters(myTestParameters);
myInstance.setErrorMessage(TEST_ERROR_MESSAGE);
myInstance.setErrorCount(TEST_ERROR_COUNT);
+
+ when(myJobDefinitionRegistry.getJobDefinitionOrThrowException(any())).thenReturn((JobDefinition) myJobDefinition);
}
@Test
diff --git a/hapi-fhir-storage-cr/pom.xml b/hapi-fhir-storage-cr/pom.xml
index 9e4a8473234..441e2b2cfea 100644
--- a/hapi-fhir-storage-cr/pom.xml
+++ b/hapi-fhir-storage-cr/pom.xml
@@ -7,7 +7,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 6.4.2
+ 6.4.2-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-storage-mdm/pom.xml b/hapi-fhir-storage-mdm/pom.xml
index cc78f101f09..b1a309b84ad 100644
--- a/hapi-fhir-storage-mdm/pom.xml
+++ b/hapi-fhir-storage-mdm/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 6.4.2
+ 6.4.2-SNAPSHOT
../hapi-deployable-pom/pom.xml
4.0.0
diff --git a/hapi-fhir-storage-test-utilities/pom.xml b/hapi-fhir-storage-test-utilities/pom.xml
index 7bd61c285e3..e42408f2911 100644
--- a/hapi-fhir-storage-test-utilities/pom.xml
+++ b/hapi-fhir-storage-test-utilities/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 6.4.2
+ 6.4.2-SNAPSHOT
../hapi-deployable-pom/pom.xml
4.0.0
diff --git a/hapi-fhir-storage/pom.xml b/hapi-fhir-storage/pom.xml
index c2976ed750d..9dd4a24c4ed 100644
--- a/hapi-fhir-storage/pom.xml
+++ b/hapi-fhir-storage/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 6.4.2
+ 6.4.2-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-storage/src/main/java/ca/uhn/fhir/jpa/batch/models/Batch2JobStartResponse.java b/hapi-fhir-storage/src/main/java/ca/uhn/fhir/jpa/batch/models/Batch2JobStartResponse.java
index 0b60f0ef43d..aefe3c8ed4f 100644
--- a/hapi-fhir-storage/src/main/java/ca/uhn/fhir/jpa/batch/models/Batch2JobStartResponse.java
+++ b/hapi-fhir-storage/src/main/java/ca/uhn/fhir/jpa/batch/models/Batch2JobStartResponse.java
@@ -25,6 +25,7 @@ public class Batch2JobStartResponse {
/**
* The job id
*/
+ // TODO: JA rename this field - Should be instanceId nor JobId
private String myJobId;
/**
diff --git a/hapi-fhir-storage/src/main/java/ca/uhn/fhir/jpa/dao/tx/NonTransactionalHapiTransactionService.java b/hapi-fhir-storage/src/main/java/ca/uhn/fhir/jpa/dao/tx/NonTransactionalHapiTransactionService.java
new file mode 100644
index 00000000000..42df6d1ef60
--- /dev/null
+++ b/hapi-fhir-storage/src/main/java/ca/uhn/fhir/jpa/dao/tx/NonTransactionalHapiTransactionService.java
@@ -0,0 +1,39 @@
+package ca.uhn.fhir.jpa.dao.tx;
+
+/*-
+ * #%L
+ * HAPI FHIR Storage api
+ * %%
+ * Copyright (C) 2014 - 2023 Smile CDR, Inc.
+ * %%
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * #L%
+ */
+
+import org.springframework.transaction.support.TransactionCallback;
+
+import javax.annotation.Nullable;
+
+/**
+ * A transaction service implementation that does not actually
+ * wrap any transactions. This is mostly intended for tests but
+ * could be used in non-transactional systems too.
+ */
+public class NonTransactionalHapiTransactionService extends HapiTransactionService {
+
+ @Nullable
+ @Override
+ protected T doExecute(ExecutionBuilder theExecutionBuilder, TransactionCallback theCallback) {
+ return theCallback.doInTransaction(null);
+ }
+}
diff --git a/hapi-fhir-structures-dstu2.1/pom.xml b/hapi-fhir-structures-dstu2.1/pom.xml
index 7dccdb8f508..b8db763eeaa 100644
--- a/hapi-fhir-structures-dstu2.1/pom.xml
+++ b/hapi-fhir-structures-dstu2.1/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 6.4.2
+ 6.4.2-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-structures-dstu2/pom.xml b/hapi-fhir-structures-dstu2/pom.xml
index e6cb0440500..7354e7148f0 100644
--- a/hapi-fhir-structures-dstu2/pom.xml
+++ b/hapi-fhir-structures-dstu2/pom.xml
@@ -4,7 +4,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 6.4.2
+ 6.4.2-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-structures-dstu3/pom.xml b/hapi-fhir-structures-dstu3/pom.xml
index de297e5cae4..8c162c2b5ce 100644
--- a/hapi-fhir-structures-dstu3/pom.xml
+++ b/hapi-fhir-structures-dstu3/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 6.4.2
+ 6.4.2-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-structures-hl7org-dstu2/pom.xml b/hapi-fhir-structures-hl7org-dstu2/pom.xml
index 1d8d5d06f53..c4f3e3d1e4f 100644
--- a/hapi-fhir-structures-hl7org-dstu2/pom.xml
+++ b/hapi-fhir-structures-hl7org-dstu2/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 6.4.2
+ 6.4.2-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-structures-r4/pom.xml b/hapi-fhir-structures-r4/pom.xml
index b270bb77b41..32d4ede00a0 100644
--- a/hapi-fhir-structures-r4/pom.xml
+++ b/hapi-fhir-structures-r4/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 6.4.2
+ 6.4.2-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-structures-r4b/pom.xml b/hapi-fhir-structures-r4b/pom.xml
index b331d032c5a..3941eaa8289 100644
--- a/hapi-fhir-structures-r4b/pom.xml
+++ b/hapi-fhir-structures-r4b/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 6.4.2
+ 6.4.2-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-structures-r5/pom.xml b/hapi-fhir-structures-r5/pom.xml
index 44cc87c0a9e..5be69885b2c 100644
--- a/hapi-fhir-structures-r5/pom.xml
+++ b/hapi-fhir-structures-r5/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 6.4.2
+ 6.4.2-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-test-utilities/pom.xml b/hapi-fhir-test-utilities/pom.xml
index 62048cde28c..231ea0cfa6d 100644
--- a/hapi-fhir-test-utilities/pom.xml
+++ b/hapi-fhir-test-utilities/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 6.4.2
+ 6.4.2-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-testpage-overlay/pom.xml b/hapi-fhir-testpage-overlay/pom.xml
index 6f1a304f513..789be28bc48 100644
--- a/hapi-fhir-testpage-overlay/pom.xml
+++ b/hapi-fhir-testpage-overlay/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-fhir
- 6.4.2
+ 6.4.2-SNAPSHOT
../pom.xml
diff --git a/hapi-fhir-validation-resources-dstu2.1/pom.xml b/hapi-fhir-validation-resources-dstu2.1/pom.xml
index ec4dc58f4d1..471b2f57ad4 100644
--- a/hapi-fhir-validation-resources-dstu2.1/pom.xml
+++ b/hapi-fhir-validation-resources-dstu2.1/pom.xml
@@ -4,7 +4,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 6.4.2
+ 6.4.2-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-validation-resources-dstu2/pom.xml b/hapi-fhir-validation-resources-dstu2/pom.xml
index 07fc86cc7c7..b037b9f74fb 100644
--- a/hapi-fhir-validation-resources-dstu2/pom.xml
+++ b/hapi-fhir-validation-resources-dstu2/pom.xml
@@ -4,7 +4,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 6.4.2
+ 6.4.2-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-validation-resources-dstu3/pom.xml b/hapi-fhir-validation-resources-dstu3/pom.xml
index f7ee4375728..476825f40a1 100644
--- a/hapi-fhir-validation-resources-dstu3/pom.xml
+++ b/hapi-fhir-validation-resources-dstu3/pom.xml
@@ -4,7 +4,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 6.4.2
+ 6.4.2-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-validation-resources-r4/pom.xml b/hapi-fhir-validation-resources-r4/pom.xml
index 93309f4fb41..115893f5210 100644
--- a/hapi-fhir-validation-resources-r4/pom.xml
+++ b/hapi-fhir-validation-resources-r4/pom.xml
@@ -4,7 +4,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 6.4.2
+ 6.4.2-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-validation-resources-r4b/pom.xml b/hapi-fhir-validation-resources-r4b/pom.xml
index 9b1ad01392f..3422ccfed3a 100644
--- a/hapi-fhir-validation-resources-r4b/pom.xml
+++ b/hapi-fhir-validation-resources-r4b/pom.xml
@@ -4,7 +4,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 6.4.2
+ 6.4.2-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-validation-resources-r5/pom.xml b/hapi-fhir-validation-resources-r5/pom.xml
index f3e25b37954..92dd788a9ad 100644
--- a/hapi-fhir-validation-resources-r5/pom.xml
+++ b/hapi-fhir-validation-resources-r5/pom.xml
@@ -4,7 +4,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 6.4.2
+ 6.4.2-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-validation/pom.xml b/hapi-fhir-validation/pom.xml
index b5d72e760ee..b11bd5692d0 100644
--- a/hapi-fhir-validation/pom.xml
+++ b/hapi-fhir-validation/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-deployable-pom
- 6.4.2
+ 6.4.2-SNAPSHOT
../hapi-deployable-pom/pom.xml
diff --git a/hapi-tinder-plugin/pom.xml b/hapi-tinder-plugin/pom.xml
index f522a045ad8..79c8cf38630 100644
--- a/hapi-tinder-plugin/pom.xml
+++ b/hapi-tinder-plugin/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhir
hapi-fhir
- 6.4.2
+ 6.4.2-SNAPSHOT
../pom.xml
diff --git a/hapi-tinder-test/pom.xml b/hapi-tinder-test/pom.xml
index dd7f4020c26..ca87adf8dfd 100644
--- a/hapi-tinder-test/pom.xml
+++ b/hapi-tinder-test/pom.xml
@@ -4,7 +4,7 @@
ca.uhn.hapi.fhir
hapi-fhir
- 6.4.2
+ 6.4.2-SNAPSHOT
../pom.xml
diff --git a/pom.xml b/pom.xml
index bac21a2986a..d61631903c1 100644
--- a/pom.xml
+++ b/pom.xml
@@ -7,7 +7,7 @@
ca.uhn.hapi.fhir
hapi-fhir
pom
- 6.4.2
+ 6.4.2-SNAPSHOT
HAPI-FHIR
An open-source implementation of the FHIR specification in Java.
https://hapifhir.io
@@ -2152,7 +2152,7 @@
ca.uhn.hapi.fhir
hapi-fhir-checkstyle
- 6.4.2
+ 6.4.2-SNAPSHOT
@@ -3033,20 +3033,6 @@
-
- LGTM
-
-
-
- org.apache.maven.plugins
- maven-enforcer-plugin
-
- true
-
-
-
-
-