Improve bulk export performance (#4569)
* Improve performance on bulk export * Add changelog * Start working on cleaned up reducer * Clean up batch calls * Work on issues * Build fixes * Test fixing * Test fixes * Work on progress * Add changelog * Build tweak * Fixes * Fixes * Fix compile * Test fixes * Bump to 6.4.2-SNAPSHOT * Fix compile --------- Co-authored-by: Tadgh <garygrantgraham@gmail.com>
This commit is contained in:
parent
e14b289bc7
commit
3a8b42dc39
|
@ -4,7 +4,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-fhir</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -115,7 +115,9 @@ public enum VersionEnum {
|
|||
V6_3_0,
|
||||
V6_4_0,
|
||||
V6_4_1,
|
||||
V6_4_2
|
||||
V6_4_2,
|
||||
V6_5_0,
|
||||
V6_6_0
|
||||
;
|
||||
|
||||
public static VersionEnum latestVersion() {
|
||||
|
|
|
@ -4,14 +4,14 @@
|
|||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-fhir-bom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<packaging>pom</packaging>
|
||||
<name>HAPI FHIR BOM</name>
|
||||
|
||||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-fhir</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-fhir-cli</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../../hapi-deployable-pom</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-fhir</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-fhir</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
type: perf
|
||||
issue: 4569
|
||||
backport: 6.4.1
|
||||
title: "A race condition in the Bulk Export module sometimes resulted in bulk export jobs producing completion
|
||||
reports that did not contain all generated output files. This has been corrected."
|
|
@ -0,0 +1,7 @@
|
|||
---
|
||||
type: perf
|
||||
issue: 4569
|
||||
backport: 6.4.1
|
||||
title: "An inefficient query in the JPA Bulk Export module was optimized. This query caused exports for resources
|
||||
containing tags/security labels/profiles to perform a number of redundant database lookups, so this type of
|
||||
export should be much faster now."
|
|
@ -0,0 +1,3 @@
|
|||
---
|
||||
release-date: "TBD"
|
||||
codename: "TBD"
|
|
@ -11,7 +11,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -47,6 +47,7 @@ import org.springframework.transaction.PlatformTransactionManager;
|
|||
import org.springframework.transaction.TransactionDefinition;
|
||||
import org.springframework.transaction.annotation.Propagation;
|
||||
import org.springframework.transaction.annotation.Transactional;
|
||||
import org.springframework.transaction.support.TransactionSynchronizationManager;
|
||||
import org.springframework.transaction.support.TransactionTemplate;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
|
@ -61,6 +62,7 @@ import java.util.function.Consumer;
|
|||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static ca.uhn.fhir.jpa.entity.Batch2WorkChunkEntity.ERROR_MSG_MAX_LENGTH;
|
||||
import static org.apache.commons.lang3.StringUtils.isBlank;
|
||||
|
||||
public class JpaJobPersistenceImpl implements IJobPersistence {
|
||||
|
@ -235,7 +237,8 @@ public class JpaJobPersistenceImpl implements IJobPersistence {
|
|||
@Override
|
||||
@Transactional(propagation = Propagation.REQUIRES_NEW)
|
||||
public void markWorkChunkAsErroredAndIncrementErrorCount(String theChunkId, String theErrorMessage) {
|
||||
myWorkChunkRepository.updateChunkStatusAndIncrementErrorCountForEndError(theChunkId, new Date(), theErrorMessage, StatusEnum.ERRORED);
|
||||
String errorMessage = truncateErrorMessage(theErrorMessage);
|
||||
myWorkChunkRepository.updateChunkStatusAndIncrementErrorCountForEndError(theChunkId, new Date(), errorMessage, StatusEnum.ERRORED);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -251,28 +254,39 @@ public class JpaJobPersistenceImpl implements IJobPersistence {
|
|||
@Transactional(propagation = Propagation.REQUIRES_NEW)
|
||||
public void markWorkChunkAsFailed(String theChunkId, String theErrorMessage) {
|
||||
ourLog.info("Marking chunk {} as failed with message: {}", theChunkId, theErrorMessage);
|
||||
String errorMessage;
|
||||
if (theErrorMessage.length() > Batch2WorkChunkEntity.ERROR_MSG_MAX_LENGTH) {
|
||||
ourLog.warn("Truncating error message that is too long to store in database: {}", theErrorMessage);
|
||||
errorMessage = theErrorMessage.substring(0, Batch2WorkChunkEntity.ERROR_MSG_MAX_LENGTH);
|
||||
} else {
|
||||
errorMessage = theErrorMessage;
|
||||
}
|
||||
String errorMessage = truncateErrorMessage(theErrorMessage);
|
||||
myWorkChunkRepository.updateChunkStatusAndIncrementErrorCountForEndError(theChunkId, new Date(), errorMessage, StatusEnum.FAILED);
|
||||
}
|
||||
|
||||
@Override
|
||||
@Transactional(propagation = Propagation.REQUIRES_NEW)
|
||||
public void markWorkChunkAsCompletedAndClearData(String theChunkId, int theRecordsProcessed) {
|
||||
myWorkChunkRepository.updateChunkStatusAndClearDataForEndSuccess(theChunkId, new Date(), theRecordsProcessed, StatusEnum.COMPLETED);
|
||||
@Nonnull
|
||||
private static String truncateErrorMessage(String theErrorMessage) {
|
||||
String errorMessage;
|
||||
if (theErrorMessage != null && theErrorMessage.length() > ERROR_MSG_MAX_LENGTH) {
|
||||
ourLog.warn("Truncating error message that is too long to store in database: {}", theErrorMessage);
|
||||
errorMessage = theErrorMessage.substring(0, ERROR_MSG_MAX_LENGTH);
|
||||
} else {
|
||||
errorMessage = theErrorMessage;
|
||||
}
|
||||
return errorMessage;
|
||||
}
|
||||
|
||||
@Override
|
||||
@Transactional(propagation = Propagation.REQUIRES_NEW)
|
||||
public void markWorkChunksWithStatusAndWipeData(String theInstanceId, List<String> theChunkIds, StatusEnum theStatus, String theErrorMsg) {
|
||||
public void markWorkChunkAsCompletedAndClearData(String theInstanceId, String theChunkId, int theRecordsProcessed) {
|
||||
StatusEnum newStatus = StatusEnum.COMPLETED;
|
||||
ourLog.debug("Marking chunk {} for instance {} to status {}", theChunkId, theInstanceId, newStatus);
|
||||
myWorkChunkRepository.updateChunkStatusAndClearDataForEndSuccess(theChunkId, new Date(), theRecordsProcessed, newStatus);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void markWorkChunksWithStatusAndWipeData(String theInstanceId, List<String> theChunkIds, StatusEnum theStatus, String theErrorMessage) {
|
||||
assert TransactionSynchronizationManager.isActualTransactionActive();
|
||||
|
||||
ourLog.debug("Marking all chunks for instance {} to status {}", theInstanceId, theStatus);
|
||||
String errorMessage = truncateErrorMessage(theErrorMessage);
|
||||
List<List<String>> listOfListOfIds = ListUtils.partition(theChunkIds, 100);
|
||||
for (List<String> idList : listOfListOfIds) {
|
||||
myWorkChunkRepository.updateAllChunksForInstanceStatusClearDataAndSetError(idList, new Date(), theStatus, theErrorMsg);
|
||||
myWorkChunkRepository.updateAllChunksForInstanceStatusClearDataAndSetError(idList, new Date(), theStatus, errorMessage);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -285,6 +299,13 @@ public class JpaJobPersistenceImpl implements IJobPersistence {
|
|||
@Override
|
||||
@Transactional(propagation = Propagation.REQUIRES_NEW)
|
||||
public boolean canAdvanceInstanceToNextStep(String theInstanceId, String theCurrentStepId) {
|
||||
Optional<Batch2JobInstanceEntity> instance = myJobInstanceRepository.findById(theInstanceId);
|
||||
if (!instance.isPresent()) {
|
||||
return false;
|
||||
}
|
||||
if (instance.get().getStatus().isEnded()) {
|
||||
return false;
|
||||
}
|
||||
List<StatusEnum> statusesForStep = myWorkChunkRepository.getDistinctStatusesForStep(theInstanceId, theCurrentStepId);
|
||||
ourLog.debug("Checking whether gated job can advanced to next step. [instanceId={}, currentStepId={}, statusesForStep={}]", theInstanceId, theCurrentStepId, statusesForStep);
|
||||
return statusesForStep.stream().noneMatch(StatusEnum::isIncomplete) && statusesForStep.stream().anyMatch(status -> status == StatusEnum.COMPLETED);
|
||||
|
@ -314,6 +335,11 @@ public class JpaJobPersistenceImpl implements IJobPersistence {
|
|||
return myTxTemplate.execute(tx -> myWorkChunkRepository.fetchAllChunkIdsForStepWithStatus(theInstanceId, theStepId, theStatusEnum));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateInstanceUpdateTime(String theInstanceId) {
|
||||
myJobInstanceRepository.updateInstanceUpdateTime(theInstanceId, new Date());
|
||||
}
|
||||
|
||||
private void fetchChunksForStep(String theInstanceId, String theStepId, int thePageSize, int thePageIndex, Consumer<WorkChunk> theConsumer) {
|
||||
myTxTemplate.executeWithoutResult(tx -> {
|
||||
List<Batch2WorkChunkEntity> chunks = myWorkChunkRepository.fetchChunksForStep(PageRequest.of(thePageIndex, thePageSize), theInstanceId, theStepId);
|
||||
|
@ -380,6 +406,7 @@ public class JpaJobPersistenceImpl implements IJobPersistence {
|
|||
instanceEntity.setReport(theInstance.getReport());
|
||||
|
||||
myJobInstanceRepository.save(instanceEntity);
|
||||
|
||||
return recordsChangedByStatusUpdate > 0;
|
||||
}
|
||||
|
||||
|
@ -393,8 +420,9 @@ public class JpaJobPersistenceImpl implements IJobPersistence {
|
|||
|
||||
@Override
|
||||
@Transactional(propagation = Propagation.REQUIRES_NEW)
|
||||
public void deleteChunks(String theInstanceId) {
|
||||
public void deleteChunksAndMarkInstanceAsChunksPurged(String theInstanceId) {
|
||||
ourLog.info("Deleting all chunks for instance ID: {}", theInstanceId);
|
||||
myJobInstanceRepository.updateWorkChunksPurgedTrue(theInstanceId);
|
||||
myWorkChunkRepository.deleteAllForInstance(theInstanceId);
|
||||
}
|
||||
|
||||
|
|
|
@ -38,13 +38,17 @@ public interface IBatch2JobInstanceRepository extends JpaRepository<Batch2JobIns
|
|||
@Query("UPDATE Batch2JobInstanceEntity e SET e.myStatus = :status WHERE e.myId = :id and e.myStatus <> :status")
|
||||
int updateInstanceStatus(@Param("id") String theInstanceId, @Param("status") StatusEnum theStatus);
|
||||
|
||||
@Modifying
|
||||
@Query("UPDATE Batch2JobInstanceEntity e SET e.myUpdateTime = :updated WHERE e.myId = :id")
|
||||
int updateInstanceUpdateTime(@Param("id") String theInstanceId, @Param("updated") Date theUpdated);
|
||||
|
||||
@Modifying
|
||||
@Query("UPDATE Batch2JobInstanceEntity e SET e.myCancelled = :cancelled WHERE e.myId = :id")
|
||||
int updateInstanceCancelled(@Param("id") String theInstanceId, @Param("cancelled") boolean theCancelled);
|
||||
|
||||
@Modifying
|
||||
@Query("UPDATE Batch2JobInstanceEntity e SET e.myCurrentGatedStepId = :currentGatedStepId WHERE e.myId = :id")
|
||||
void updateInstanceCurrentGatedStepId(@Param("id") String theInstanceId, @Param("currentGatedStepId") String theCurrentGatedStepId);
|
||||
@Query("UPDATE Batch2JobInstanceEntity e SET e.myWorkChunksPurged = true WHERE e.myId = :id")
|
||||
int updateWorkChunksPurgedTrue(@Param("id") String theInstanceId);
|
||||
|
||||
@Query("SELECT b from Batch2JobInstanceEntity b WHERE b.myDefinitionId = :defId AND b.myParamsJson = :params AND b.myStatus IN( :stats )")
|
||||
List<Batch2JobInstanceEntity> findInstancesByJobIdParamsAndStatus(
|
||||
|
|
|
@ -116,7 +116,7 @@ class JpaJobPersistenceImplTest {
|
|||
String jobId = "jobid";
|
||||
|
||||
// test
|
||||
mySvc.deleteChunks(jobId);
|
||||
mySvc.deleteChunksAndMarkInstanceAsChunksPurged(jobId);
|
||||
|
||||
// verify
|
||||
verify(myWorkChunkRepository)
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -41,6 +41,7 @@ import static org.hamcrest.Matchers.empty;
|
|||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||
import static org.junit.jupiter.api.Assertions.assertNotEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertNotNull;
|
||||
import static org.junit.jupiter.api.Assertions.assertNull;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
|
@ -55,6 +56,7 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
|
|||
public static final int JOB_DEF_VER = 1;
|
||||
public static final int SEQUENCE_NUMBER = 1;
|
||||
public static final String CHUNK_DATA = "{\"key\":\"value\"}";
|
||||
public static final String INSTANCE_ID = "instance-id";
|
||||
|
||||
@Autowired
|
||||
private IJobPersistence mySvc;
|
||||
|
@ -102,7 +104,7 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
|
|||
|
||||
// Execute
|
||||
|
||||
mySvc.deleteChunks(instanceId);
|
||||
mySvc.deleteChunksAndMarkInstanceAsChunksPurged(instanceId);
|
||||
|
||||
// Verify
|
||||
|
||||
|
@ -216,19 +218,6 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
|
|||
.collect(Collectors.toUnmodifiableSet()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a set of statuses, and whether they should be successfully picked up and started by a consumer.
|
||||
* @return
|
||||
*/
|
||||
public static List<Arguments> provideStatuses() {
|
||||
return List.of(
|
||||
Arguments.of(StatusEnum.QUEUED, true),
|
||||
Arguments.of(StatusEnum.IN_PROGRESS, true),
|
||||
Arguments.of(StatusEnum.ERRORED, true),
|
||||
Arguments.of(StatusEnum.FAILED, false),
|
||||
Arguments.of(StatusEnum.COMPLETED, false)
|
||||
);
|
||||
}
|
||||
@ParameterizedTest
|
||||
@MethodSource("provideStatuses")
|
||||
public void testStartChunkOnlyWorksOnValidChunks(StatusEnum theStatus, boolean theShouldBeStartedByConsumer) {
|
||||
|
@ -236,7 +225,7 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
|
|||
JobInstance instance = createInstance();
|
||||
String instanceId = mySvc.storeNewInstance(instance);
|
||||
storeWorkChunk(JOB_DEFINITION_ID, TARGET_STEP_ID, instanceId, 0, CHUNK_DATA);
|
||||
BatchWorkChunk batchWorkChunk = new BatchWorkChunk(JOB_DEFINITION_ID, JOB_DEF_VER, TARGET_STEP_ID, instanceId,0, CHUNK_DATA);
|
||||
BatchWorkChunk batchWorkChunk = new BatchWorkChunk(JOB_DEFINITION_ID, JOB_DEF_VER, TARGET_STEP_ID, instanceId, 0, CHUNK_DATA);
|
||||
String chunkId = mySvc.storeWorkChunk(batchWorkChunk);
|
||||
Optional<Batch2WorkChunkEntity> byId = myWorkChunkRepository.findById(chunkId);
|
||||
Batch2WorkChunkEntity entity = byId.get();
|
||||
|
@ -335,6 +324,24 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
|
|||
empty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUpdateTime() {
|
||||
// Setup
|
||||
JobInstance instance = createInstance();
|
||||
String instanceId = mySvc.storeNewInstance(instance);
|
||||
|
||||
Date updateTime = runInTransaction(() -> new Date(myJobInstanceRepository.findById(instanceId).orElseThrow().getUpdateTime().getTime()));
|
||||
|
||||
sleepUntilTimeChanges();
|
||||
|
||||
// Test
|
||||
runInTransaction(() -> mySvc.updateInstanceUpdateTime(instanceId));
|
||||
|
||||
// Verify
|
||||
Date updateTime2 = runInTransaction(() -> new Date(myJobInstanceRepository.findById(instanceId).orElseThrow().getUpdateTime().getTime()));
|
||||
assertNotEquals(updateTime, updateTime2);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFetchUnknownWork() {
|
||||
assertFalse(myWorkChunkRepository.findById("FOO").isPresent());
|
||||
|
@ -393,7 +400,7 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
|
|||
|
||||
sleepUntilTimeChanges();
|
||||
|
||||
mySvc.markWorkChunkAsCompletedAndClearData(chunkId, 50);
|
||||
mySvc.markWorkChunkAsCompletedAndClearData(INSTANCE_ID, chunkId, 50);
|
||||
runInTransaction(() -> {
|
||||
Batch2WorkChunkEntity entity = myWorkChunkRepository.findById(chunkId).orElseThrow(IllegalArgumentException::new);
|
||||
assertEquals(StatusEnum.COMPLETED, entity.getStatus());
|
||||
|
@ -427,13 +434,14 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
|
|||
assertEquals(1, chunks.size());
|
||||
assertEquals(5, chunks.get(0).getErrorCount());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGatedAdvancementByStatus() {
|
||||
// Setup
|
||||
JobInstance instance = createInstance();
|
||||
String instanceId = mySvc.storeNewInstance(instance);
|
||||
String chunkId = storeWorkChunk(DEF_CHUNK_ID, STEP_CHUNK_ID, instanceId, SEQUENCE_NUMBER, null);
|
||||
mySvc.markWorkChunkAsCompletedAndClearData(chunkId, 0);
|
||||
mySvc.markWorkChunkAsCompletedAndClearData(INSTANCE_ID, chunkId, 0);
|
||||
|
||||
boolean canAdvance = mySvc.canAdvanceInstanceToNextStep(instanceId, STEP_CHUNK_ID);
|
||||
assertTrue(canAdvance);
|
||||
|
@ -445,18 +453,18 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
|
|||
assertFalse(canAdvance);
|
||||
|
||||
//Toggle it to complete
|
||||
mySvc.markWorkChunkAsCompletedAndClearData(newChunkId, 0);
|
||||
mySvc.markWorkChunkAsCompletedAndClearData(INSTANCE_ID, newChunkId, 0);
|
||||
canAdvance = mySvc.canAdvanceInstanceToNextStep(instanceId, STEP_CHUNK_ID);
|
||||
assertTrue(canAdvance);
|
||||
|
||||
//Create a new chunk and set it in progress.
|
||||
String newerChunkId= storeWorkChunk(DEF_CHUNK_ID, STEP_CHUNK_ID, instanceId, SEQUENCE_NUMBER, null);
|
||||
String newerChunkId = storeWorkChunk(DEF_CHUNK_ID, STEP_CHUNK_ID, instanceId, SEQUENCE_NUMBER, null);
|
||||
mySvc.fetchWorkChunkSetStartTimeAndMarkInProgress(newerChunkId);
|
||||
canAdvance = mySvc.canAdvanceInstanceToNextStep(instanceId, STEP_CHUNK_ID);
|
||||
assertFalse(canAdvance);
|
||||
|
||||
//Toggle IN_PROGRESS to complete
|
||||
mySvc.markWorkChunkAsCompletedAndClearData(newerChunkId, 0);
|
||||
mySvc.markWorkChunkAsCompletedAndClearData(INSTANCE_ID, newerChunkId, 0);
|
||||
canAdvance = mySvc.canAdvanceInstanceToNextStep(instanceId, STEP_CHUNK_ID);
|
||||
assertTrue(canAdvance);
|
||||
}
|
||||
|
@ -609,7 +617,7 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
|
|||
chunkIds.add(id);
|
||||
}
|
||||
|
||||
mySvc.markWorkChunksWithStatusAndWipeData(instance.getInstanceId(), chunkIds, StatusEnum.COMPLETED, null);
|
||||
runInTransaction(() -> mySvc.markWorkChunksWithStatusAndWipeData(instance.getInstanceId(), chunkIds, StatusEnum.COMPLETED, null));
|
||||
|
||||
Iterator<WorkChunk> reducedChunks = mySvc.fetchAllWorkChunksIterator(instanceId, true);
|
||||
|
||||
|
@ -631,7 +639,6 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
|
|||
return instance;
|
||||
}
|
||||
|
||||
|
||||
@Nonnull
|
||||
private String storeJobInstanceAndUpdateWithEndTime(StatusEnum theStatus, int minutes) {
|
||||
final JobInstance jobInstance = new JobInstance();
|
||||
|
@ -656,4 +663,17 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
|
|||
|
||||
return id;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a set of statuses, and whether they should be successfully picked up and started by a consumer.
|
||||
*/
|
||||
public static List<Arguments> provideStatuses() {
|
||||
return List.of(
|
||||
Arguments.of(StatusEnum.QUEUED, true),
|
||||
Arguments.of(StatusEnum.IN_PROGRESS, true),
|
||||
Arguments.of(StatusEnum.ERRORED, true),
|
||||
Arguments.of(StatusEnum.FAILED, false),
|
||||
Arguments.of(StatusEnum.COMPLETED, false)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,225 @@
|
|||
package ca.uhn.fhir.jpa.bulk;
|
||||
|
||||
import ca.uhn.fhir.jpa.api.config.DaoConfig;
|
||||
import ca.uhn.fhir.jpa.api.model.BulkExportJobResults;
|
||||
import ca.uhn.fhir.jpa.api.svc.IBatch2JobRunner;
|
||||
import ca.uhn.fhir.jpa.batch.models.Batch2JobStartResponse;
|
||||
import ca.uhn.fhir.jpa.provider.BaseResourceProviderR4Test;
|
||||
import ca.uhn.fhir.jpa.util.BulkExportUtils;
|
||||
import ca.uhn.fhir.rest.api.Constants;
|
||||
import ca.uhn.fhir.rest.api.server.bulk.BulkDataExportOptions;
|
||||
import ca.uhn.fhir.rest.server.exceptions.InternalErrorException;
|
||||
import ca.uhn.fhir.util.JsonUtil;
|
||||
import com.google.common.collect.Sets;
|
||||
import org.hl7.fhir.r4.model.Binary;
|
||||
import org.hl7.fhir.r4.model.Enumerations;
|
||||
import org.hl7.fhir.r4.model.Group;
|
||||
import org.hl7.fhir.r4.model.IdType;
|
||||
import org.hl7.fhir.r4.model.Patient;
|
||||
import org.junit.jupiter.api.AfterEach;
|
||||
import org.junit.jupiter.api.Disabled;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.StringReader;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.hamcrest.MatcherAssert.assertThat;
|
||||
import static org.hamcrest.Matchers.emptyOrNullString;
|
||||
import static org.hamcrest.Matchers.hasItem;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertNotNull;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
import static org.junit.jupiter.api.Assertions.fail;
|
||||
|
||||
/**
|
||||
* A test to poke at our job framework and induce errors.
|
||||
*/
|
||||
public class BulkDataErrorAbuseTest extends BaseResourceProviderR4Test {
|
||||
private static final Logger ourLog = LoggerFactory.getLogger(BulkDataErrorAbuseTest.class);
|
||||
|
||||
@Autowired
|
||||
private DaoConfig myDaoConfig;
|
||||
|
||||
@Autowired
|
||||
private IBatch2JobRunner myJobRunner;
|
||||
|
||||
@AfterEach
|
||||
void afterEach() {
|
||||
myDaoConfig.setIndexMissingFields(DaoConfig.IndexEnabledEnum.DISABLED);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGroupBulkExportNotInGroup_DoesNotShowUp() throws InterruptedException, ExecutionException {
|
||||
duAbuseTest(100);
|
||||
}
|
||||
|
||||
/**
|
||||
* This test is disabled because it never actually exists. Run it if you want to ensure
|
||||
* that changes to the Bulk Export Batch2 task haven't affected our ability to successfully
|
||||
* run endless parallel jobs. If you run it for a few minutes and it never stops on its own,
|
||||
* you are good.
|
||||
* <p>
|
||||
* The enabled test above called {@link #testGroupBulkExportNotInGroup_DoesNotShowUp()} does
|
||||
* run with the build and runs 100 jobs.
|
||||
*/
|
||||
@Test
|
||||
@Disabled
|
||||
public void testNonStopAbuseBatch2BulkExportStressTest() throws InterruptedException, ExecutionException {
|
||||
duAbuseTest(Integer.MAX_VALUE);
|
||||
}
|
||||
|
||||
private void duAbuseTest(int taskExecutions) throws InterruptedException, ExecutionException {
|
||||
// Create some resources
|
||||
Patient patient = new Patient();
|
||||
patient.setId("PING1");
|
||||
patient.setGender(Enumerations.AdministrativeGender.FEMALE);
|
||||
patient.setActive(true);
|
||||
myClient.update().resource(patient).execute();
|
||||
|
||||
patient = new Patient();
|
||||
patient.setId("PING2");
|
||||
patient.setGender(Enumerations.AdministrativeGender.MALE);
|
||||
patient.setActive(true);
|
||||
myClient.update().resource(patient).execute();
|
||||
|
||||
patient = new Patient();
|
||||
patient.setId("PNING3");
|
||||
patient.setGender(Enumerations.AdministrativeGender.MALE);
|
||||
patient.setActive(true);
|
||||
myClient.update().resource(patient).execute();
|
||||
|
||||
Group group = new Group();
|
||||
group.setId("Group/G2");
|
||||
group.setActive(true);
|
||||
group.addMember().getEntity().setReference("Patient/PING1");
|
||||
group.addMember().getEntity().setReference("Patient/PING2");
|
||||
myClient.update().resource(group).execute();
|
||||
|
||||
// set the export options
|
||||
BulkDataExportOptions options = new BulkDataExportOptions();
|
||||
options.setResourceTypes(Sets.newHashSet("Patient"));
|
||||
options.setGroupId(new IdType("Group", "G2"));
|
||||
options.setFilters(new HashSet<>());
|
||||
options.setExportStyle(BulkDataExportOptions.ExportStyle.GROUP);
|
||||
options.setOutputFormat(Constants.CT_FHIR_NDJSON);
|
||||
|
||||
BlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<>();
|
||||
ExecutorService executorService = new ThreadPoolExecutor(10, 10,
|
||||
0L, TimeUnit.MILLISECONDS,
|
||||
workQueue);
|
||||
|
||||
ourLog.info("Starting task creation");
|
||||
|
||||
List<Future<Boolean>> futures = new ArrayList<>();
|
||||
for (int i = 0; i < taskExecutions; i++) {
|
||||
futures.add(executorService.submit(() -> {
|
||||
String instanceId = null;
|
||||
try {
|
||||
instanceId = startJob(options);
|
||||
|
||||
// Run a scheduled pass to build the export
|
||||
myBatch2JobHelper.awaitJobCompletion(instanceId, 60);
|
||||
|
||||
verifyBulkExportResults(instanceId, List.of("Patient/PING1", "Patient/PING2"), Collections.singletonList("Patient/PNING3"));
|
||||
|
||||
return true;
|
||||
} catch (Throwable theError) {
|
||||
ourLog.error("Caught an error during processing instance {}", instanceId, theError);
|
||||
throw new InternalErrorException("Caught an error during processing instance " + instanceId, theError);
|
||||
}
|
||||
}));
|
||||
|
||||
// Don't let the list of futures grow so big we run out of memory
|
||||
if (futures.size() > 200) {
|
||||
while (futures.size() > 100) {
|
||||
// This should always return true, but it'll throw an exception if we failed
|
||||
assertTrue(futures.remove(0).get());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ourLog.info("Done creating tasks, waiting for task completion");
|
||||
|
||||
for (var next : futures) {
|
||||
// This should always return true, but it'll throw an exception if we failed
|
||||
assertTrue(next.get());
|
||||
}
|
||||
|
||||
ourLog.info("Finished task execution");
|
||||
}
|
||||
|
||||
|
||||
private void verifyBulkExportResults(String theInstanceId, List<String> theContainedList, List<String> theExcludedList) {
|
||||
// Iterate over the files
|
||||
String report = myJobRunner.getJobInfo(theInstanceId).getReport();
|
||||
ourLog.debug("Export job {} report: {}", theInstanceId, report);
|
||||
if (!theContainedList.isEmpty()) {
|
||||
assertThat("report for instance " + theInstanceId + " is empty", report, not(emptyOrNullString()));
|
||||
}
|
||||
BulkExportJobResults results = JsonUtil.deserialize(report, BulkExportJobResults.class);
|
||||
|
||||
Set<String> foundIds = new HashSet<>();
|
||||
for (Map.Entry<String, List<String>> file : results.getResourceTypeToBinaryIds().entrySet()) {
|
||||
String resourceType = file.getKey();
|
||||
List<String> binaryIds = file.getValue();
|
||||
for (var nextBinaryId : binaryIds) {
|
||||
|
||||
Binary binary = myBinaryDao.read(new IdType(nextBinaryId), mySrd);
|
||||
assertEquals(Constants.CT_FHIR_NDJSON, binary.getContentType());
|
||||
|
||||
String nextNdJsonFileContent = new String(binary.getContent(), Constants.CHARSET_UTF8);
|
||||
ourLog.trace("Export job {} file {} contents: {}", theInstanceId, nextBinaryId, nextNdJsonFileContent);
|
||||
|
||||
List<String> lines = new BufferedReader(new StringReader(nextNdJsonFileContent))
|
||||
.lines().toList();
|
||||
ourLog.debug("Export job {} file {} line-count: {}", theInstanceId, nextBinaryId, lines.size());
|
||||
|
||||
lines.stream()
|
||||
.map(line -> myFhirContext.newJsonParser().parseResource(line))
|
||||
.map(r -> r.getIdElement().toUnqualifiedVersionless())
|
||||
.forEach(nextId -> {
|
||||
if (!resourceType.equals(nextId.getResourceType())) {
|
||||
fail("Found resource of type " + nextId.getResourceType() + " in file for type " + resourceType);
|
||||
} else {
|
||||
if (!foundIds.add(nextId.getValue())) {
|
||||
fail("Found duplicate ID: " + nextId.getValue());
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
ourLog.debug("Export job {} exported resources {}", theInstanceId, foundIds);
|
||||
|
||||
for (String containedString : theContainedList) {
|
||||
assertThat("export has expected ids", foundIds, hasItem(containedString));
|
||||
}
|
||||
for (String excludedString : theExcludedList) {
|
||||
assertThat("export doesn't have expected ids", foundIds, not(hasItem(excludedString)));
|
||||
}
|
||||
}
|
||||
|
||||
private String startJob(BulkDataExportOptions theOptions) {
|
||||
Batch2JobStartResponse startResponse = myJobRunner.startNewJob(BulkExportUtils.createBulkExportJobParametersFromExportOptions(theOptions));
|
||||
assertNotNull(startResponse);
|
||||
return startResponse.getJobId();
|
||||
}
|
||||
|
||||
}
|
|
@ -53,6 +53,7 @@ import java.util.Set;
|
|||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static ca.uhn.fhir.jpa.dao.r4.FhirResourceDaoR4TagsInlineTest.createSearchParameterForInlineSecurity;
|
||||
import static org.apache.commons.lang3.StringUtils.isNotBlank;
|
||||
import static org.awaitility.Awaitility.await;
|
||||
import static org.hamcrest.MatcherAssert.assertThat;
|
||||
|
@ -73,7 +74,8 @@ public class BulkDataExportIT extends BaseResourceProviderR4Test {
|
|||
|
||||
@AfterEach
|
||||
void afterEach() {
|
||||
myDaoConfig.setIndexMissingFields(DaoConfig.IndexEnabledEnum.DISABLED);
|
||||
myDaoConfig.setIndexMissingFields(new DaoConfig().getIndexMissingFields());
|
||||
myDaoConfig.setTagStorageMode(new DaoConfig().getTagStorageMode());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -108,6 +110,45 @@ public class BulkDataExportIT extends BaseResourceProviderR4Test {
|
|||
verifyBulkExportResults(options, Collections.singletonList("Patient/PF"), Collections.singletonList("Patient/PM"));
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testGroupBulkExportWithTypeFilter_OnTags_InlineTagMode() {
|
||||
myDaoConfig.setTagStorageMode(DaoConfig.TagStorageModeEnum.INLINE);
|
||||
mySearchParameterDao.update(createSearchParameterForInlineSecurity(), mySrd);
|
||||
mySearchParamRegistry.forceRefresh();
|
||||
|
||||
// Create some resources
|
||||
Patient patient = new Patient();
|
||||
patient.setId("PF");
|
||||
patient.getMeta().addSecurity("http://security", "val0", null);
|
||||
patient.setActive(true);
|
||||
myClient.update().resource(patient).execute();
|
||||
|
||||
patient = new Patient();
|
||||
patient.setId("PM");
|
||||
patient.getMeta().addSecurity("http://security", "val1", null);
|
||||
patient.setActive(true);
|
||||
myClient.update().resource(patient).execute();
|
||||
|
||||
Group group = new Group();
|
||||
group.setId("Group/G");
|
||||
group.setActive(true);
|
||||
group.addMember().getEntity().setReference("Patient/PF");
|
||||
group.addMember().getEntity().setReference("Patient/PM");
|
||||
myClient.update().resource(group).execute();
|
||||
|
||||
// set the export options
|
||||
BulkDataExportOptions options = new BulkDataExportOptions();
|
||||
options.setResourceTypes(Sets.newHashSet("Patient"));
|
||||
options.setGroupId(new IdType("Group", "G"));
|
||||
options.setFilters(Sets.newHashSet("Patient?_security=http://security|val1"));
|
||||
options.setExportStyle(BulkDataExportOptions.ExportStyle.GROUP);
|
||||
options.setOutputFormat(Constants.CT_FHIR_NDJSON);
|
||||
verifyBulkExportResults(options, Collections.singletonList("Patient/PM"), Collections.singletonList("Patient/PF"));
|
||||
}
|
||||
|
||||
|
||||
|
||||
@Test
|
||||
@Disabled("disabled to make the rel_6_4 release pipeline pass")
|
||||
public void testGroupBulkExportNotInGroup_DoesNotShowUp() {
|
||||
|
@ -714,10 +755,10 @@ public class BulkDataExportIT extends BaseResourceProviderR4Test {
|
|||
}
|
||||
|
||||
for (String containedString : theContainedList) {
|
||||
assertThat(foundIds, hasItem(containedString));
|
||||
assertThat("Didn't find expected ID " + containedString + " in IDS: " + foundIds, foundIds, hasItem(containedString));
|
||||
}
|
||||
for (String excludedString : theExcludedList) {
|
||||
assertThat(foundIds, not(hasItem(excludedString)));
|
||||
assertThat("Didn't want unexpected ID " + excludedString + " in IDS: " + foundIds, foundIds, not(hasItem(excludedString)));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,21 +1,30 @@
|
|||
package ca.uhn.fhir.jpa.bulk;
|
||||
|
||||
import ca.uhn.fhir.batch2.api.IJobMaintenanceService;
|
||||
import ca.uhn.fhir.batch2.api.IJobPersistence;
|
||||
import ca.uhn.fhir.batch2.model.JobInstance;
|
||||
import ca.uhn.fhir.batch2.model.StatusEnum;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunk;
|
||||
import ca.uhn.fhir.jpa.api.config.DaoConfig;
|
||||
import ca.uhn.fhir.jpa.api.model.BulkExportJobResults;
|
||||
import ca.uhn.fhir.jpa.api.svc.IBatch2JobRunner;
|
||||
import ca.uhn.fhir.jpa.batch.models.Batch2JobStartResponse;
|
||||
import ca.uhn.fhir.jpa.bulk.export.model.BulkExportJobStatusEnum;
|
||||
import ca.uhn.fhir.jpa.bulk.export.model.BulkExportResponseJson;
|
||||
import ca.uhn.fhir.jpa.dao.data.IBatch2JobInstanceRepository;
|
||||
import ca.uhn.fhir.jpa.dao.data.IBatch2WorkChunkRepository;
|
||||
import ca.uhn.fhir.jpa.entity.Batch2JobInstanceEntity;
|
||||
import ca.uhn.fhir.jpa.entity.Batch2WorkChunkEntity;
|
||||
import ca.uhn.fhir.jpa.provider.BaseResourceProviderR4Test;
|
||||
import ca.uhn.fhir.jpa.searchparam.SearchParameterMap;
|
||||
import ca.uhn.fhir.jpa.test.Batch2JobHelper;
|
||||
import ca.uhn.fhir.jpa.util.BulkExportUtils;
|
||||
import ca.uhn.fhir.parser.IParser;
|
||||
import ca.uhn.fhir.rest.api.Constants;
|
||||
import ca.uhn.fhir.rest.api.server.RequestDetails;
|
||||
import ca.uhn.fhir.rest.api.server.SystemRequestDetails;
|
||||
import ca.uhn.fhir.rest.api.server.bulk.BulkDataExportOptions;
|
||||
import ca.uhn.fhir.util.BundleBuilder;
|
||||
import ca.uhn.fhir.util.JsonUtil;
|
||||
import ca.uhn.fhir.util.SearchParameterUtil;
|
||||
import com.google.common.collect.Sets;
|
||||
|
@ -33,6 +42,7 @@ import org.hl7.fhir.r4.model.Encounter;
|
|||
import org.hl7.fhir.r4.model.Enumerations;
|
||||
import org.hl7.fhir.r4.model.Group;
|
||||
import org.hl7.fhir.r4.model.IdType;
|
||||
import org.hl7.fhir.r4.model.InstantType;
|
||||
import org.hl7.fhir.r4.model.Observation;
|
||||
import org.hl7.fhir.r4.model.Patient;
|
||||
import org.hl7.fhir.r4.model.Reference;
|
||||
|
@ -51,6 +61,7 @@ import java.io.IOException;
|
|||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
|
@ -84,6 +95,8 @@ public class BulkExportUseCaseIT extends BaseResourceProviderR4Test {
|
|||
|
||||
@Autowired
|
||||
private IJobPersistence myJobPersistence;
|
||||
@Autowired
|
||||
private IJobMaintenanceService myJobMaintenanceService;
|
||||
|
||||
|
||||
@Nested
|
||||
|
@ -366,6 +379,8 @@ public class BulkExportUseCaseIT extends BaseResourceProviderR4Test {
|
|||
options.setExportStyle(BulkDataExportOptions.ExportStyle.SYSTEM);
|
||||
options.setOutputFormat(Constants.CT_FHIR_NDJSON);
|
||||
|
||||
myCaptureQueriesListener.clear();
|
||||
|
||||
Batch2JobStartResponse startResponse = myJobRunner.startNewJob(BulkExportUtils.createBulkExportJobParametersFromExportOptions(options));
|
||||
|
||||
assertNotNull(startResponse);
|
||||
|
@ -375,6 +390,23 @@ public class BulkExportUseCaseIT extends BaseResourceProviderR4Test {
|
|||
// Run a scheduled pass to build the export
|
||||
myBatch2JobHelper.awaitJobCompletion(startResponse.getJobId());
|
||||
|
||||
String queries = myCaptureQueriesListener
|
||||
.getUpdateQueries()
|
||||
.stream()
|
||||
.filter(t->t.getSql(false, false).toUpperCase().contains(" BT2_JOB_INSTANCE "))
|
||||
.map(t->new InstantType(new Date(t.getQueryTimestamp())) + " - " + t.getSql(true, false))
|
||||
.collect(Collectors.joining("\n * "));
|
||||
ourLog.info("Update queries:\n * " + queries);
|
||||
|
||||
runInTransaction(()->{
|
||||
String entities = myJobInstanceRepository
|
||||
.findAll()
|
||||
.stream()
|
||||
.map(t->t.toString())
|
||||
.collect(Collectors.joining("\n * "));
|
||||
ourLog.info("Entities:\n * " + entities);
|
||||
});
|
||||
|
||||
final Optional<JobInstance> optJobInstance = myJobPersistence.fetchInstance(jobId);
|
||||
|
||||
assertNotNull(optJobInstance);
|
||||
|
@ -501,25 +533,28 @@ public class BulkExportUseCaseIT extends BaseResourceProviderR4Test {
|
|||
@Disabled("temporary for rel_6_4")
|
||||
public void testVeryLargeGroup() {
|
||||
|
||||
BundleBuilder bb = new BundleBuilder(myFhirContext);
|
||||
|
||||
Group group = new Group();
|
||||
group.setId("Group/G");
|
||||
group.setActive(true);
|
||||
bb.addTransactionUpdateEntry(group);
|
||||
|
||||
for (int i = 0; i < 600; i++) {
|
||||
Patient patient = new Patient();
|
||||
patient.setId("PING-" + i);
|
||||
patient.setGender(Enumerations.AdministrativeGender.FEMALE);
|
||||
patient.setActive(true);
|
||||
myClient.update().resource(patient).execute();
|
||||
bb.addTransactionUpdateEntry(patient);
|
||||
group.addMember().getEntity().setReference("Patient/PING-" + i);
|
||||
|
||||
Observation obs = new Observation();
|
||||
obs.setId("obs-" + i);
|
||||
obs.setSubject(new Reference("Patient/PING-" + i));
|
||||
myClient.update().resource(obs).execute();
|
||||
bb.addTransactionUpdateEntry(obs);
|
||||
}
|
||||
|
||||
myClient.update().resource(group).execute();
|
||||
myClient.transaction().withBundle(bb.getBundle()).execute();
|
||||
|
||||
HashSet<String> resourceTypes = Sets.newHashSet("Group", "Patient", "Observation");
|
||||
BulkExportJobResults bulkExportJobResults = startGroupBulkExportJobAndAwaitCompletion(resourceTypes, new HashSet<>(), "G");
|
||||
|
@ -567,7 +602,6 @@ public class BulkExportUseCaseIT extends BaseResourceProviderR4Test {
|
|||
@Test
|
||||
@Disabled("failing intermittently for latest rel_6_4")
|
||||
public void testDifferentTypesDoNotUseCachedResults() {
|
||||
|
||||
Patient patient = new Patient();
|
||||
patient.setId("PING1");
|
||||
patient.setGender(Enumerations.AdministrativeGender.FEMALE);
|
||||
|
@ -601,6 +635,13 @@ public class BulkExportUseCaseIT extends BaseResourceProviderR4Test {
|
|||
Map<String, List<IBaseResource>> secondMap = convertJobResultsToResources(altBulkExportResults);
|
||||
assertThat(secondMap.get("Patient"), hasSize(1));
|
||||
assertThat(secondMap.get("Coverage"), hasSize(1));
|
||||
|
||||
runInTransaction(()->{
|
||||
List<Batch2JobInstanceEntity> instances = myJobInstanceRepository.findAll();
|
||||
ourLog.info("Job instance states:\n * {}", instances.stream().map(Object::toString).collect(Collectors.joining("\n * ")));
|
||||
List<Batch2WorkChunkEntity> workChunks = myWorkChunkRepository.findAll();
|
||||
ourLog.info("Work chunks instance states:\n * {}", workChunks.stream().map(Object::toString).collect(Collectors.joining("\n * ")));
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
|
@ -1128,7 +1169,7 @@ public class BulkExportUseCaseIT extends BaseResourceProviderR4Test {
|
|||
|
||||
myBatch2JobHelper.awaitJobCompletion(startResponse.getJobId(), 60);
|
||||
|
||||
await().atMost(300, TimeUnit.SECONDS).until(() -> myJobRunner.getJobInfo(startResponse.getJobId()).getReport() != null);
|
||||
assertNotNull(myJobRunner.getJobInfo(startResponse.getJobId()).getReport());
|
||||
|
||||
String report = myJobRunner.getJobInfo(startResponse.getJobId()).getReport();
|
||||
BulkExportJobResults results = JsonUtil.deserialize(report, BulkExportJobResults.class);
|
||||
|
@ -1177,4 +1218,10 @@ public class BulkExportUseCaseIT extends BaseResourceProviderR4Test {
|
|||
private BulkExportJobResults startPatientBulkExportJobAndAwaitResults(HashSet<String> theTypes, HashSet<String> theFilters, String thePatientId) {
|
||||
return startBulkExportJobAndAwaitCompletion(BulkDataExportOptions.ExportStyle.PATIENT, theTypes, theFilters, thePatientId);
|
||||
}
|
||||
|
||||
@Autowired
|
||||
private IBatch2JobInstanceRepository myJobInstanceRepository;
|
||||
@Autowired
|
||||
private IBatch2WorkChunkRepository myWorkChunkRepository;
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,111 @@
|
|||
package ca.uhn.fhir.jpa.bulk.export;
|
||||
|
||||
import ca.uhn.fhir.batch2.api.IJobDataSink;
|
||||
import ca.uhn.fhir.batch2.api.StepExecutionDetails;
|
||||
import ca.uhn.fhir.batch2.jobs.export.ExpandResourcesStep;
|
||||
import ca.uhn.fhir.batch2.jobs.export.models.BulkExportJobParameters;
|
||||
import ca.uhn.fhir.batch2.jobs.export.models.ExpandedResourcesList;
|
||||
import ca.uhn.fhir.batch2.jobs.export.models.ResourceIdList;
|
||||
import ca.uhn.fhir.batch2.jobs.models.BatchResourceId;
|
||||
import ca.uhn.fhir.batch2.model.JobInstance;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunkData;
|
||||
import ca.uhn.fhir.jpa.api.config.DaoConfig;
|
||||
import ca.uhn.fhir.jpa.test.BaseJpaR4Test;
|
||||
import ca.uhn.fhir.rest.server.interceptor.ResponseSizeCapturingInterceptor;
|
||||
import org.hl7.fhir.r4.model.Patient;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.params.ParameterizedTest;
|
||||
import org.junit.jupiter.params.provider.CsvFileSource;
|
||||
import org.junit.jupiter.params.provider.CsvSource;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
import org.mockito.Captor;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.Mockito;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.IntStream;
|
||||
|
||||
import static org.hamcrest.MatcherAssert.assertThat;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
|
||||
public class ExpandResourcesStepJpaTest extends BaseJpaR4Test {
|
||||
|
||||
@Autowired
|
||||
private ExpandResourcesStep myExpandResourcesStep;
|
||||
|
||||
@Mock
|
||||
private IJobDataSink<ExpandedResourcesList> mySink;
|
||||
@Captor
|
||||
private ArgumentCaptor<ExpandedResourcesList> myWorkChunkCaptor;
|
||||
|
||||
@Override
|
||||
public void afterCleanupDao() {
|
||||
super.afterCleanupDao();
|
||||
|
||||
myDaoConfig.setTagStorageMode(new DaoConfig().getTagStorageMode());
|
||||
}
|
||||
|
||||
/**
|
||||
* Make sure we load inline tags efficiently when generating bulk export
|
||||
*/
|
||||
@ParameterizedTest
|
||||
@CsvSource({"INLINE,2", "NON_VERSIONED,3", "VERSIONED,3"})
|
||||
public void testBulkExportExpandResourcesStep(DaoConfig.TagStorageModeEnum theTagStorageMode, int theExpectedSelectQueries) {
|
||||
// Setup
|
||||
|
||||
myDaoConfig.setTagStorageMode(theTagStorageMode);
|
||||
|
||||
int count = 10;
|
||||
List<Long> ids = IntStream.range(0, count)
|
||||
.boxed()
|
||||
.map(t -> {
|
||||
Patient p = new Patient();
|
||||
p.getMeta().addTag().setSystem("http://static").setCode("tag");
|
||||
p.getMeta().addTag().setSystem("http://dynamic").setCode("tag" + t);
|
||||
return myPatientDao.create(p, mySrd).getId().getIdPartAsLong();
|
||||
}).toList();
|
||||
assertEquals(count, ids.size());
|
||||
|
||||
ResourceIdList resourceList = new ResourceIdList();
|
||||
resourceList.setResourceType("Patient");
|
||||
resourceList.setIds(ids.stream().map(t->new BatchResourceId().setResourceType("Patient").setId(Long.toString(t))).toList());
|
||||
|
||||
BulkExportJobParameters params = new BulkExportJobParameters();
|
||||
JobInstance jobInstance = new JobInstance();
|
||||
String chunkId = "ABC";
|
||||
|
||||
StepExecutionDetails<BulkExportJobParameters, ResourceIdList> details = new StepExecutionDetails<>(params, resourceList, jobInstance, chunkId);
|
||||
|
||||
// Test
|
||||
|
||||
myCaptureQueriesListener.clear();
|
||||
myExpandResourcesStep.run(details, mySink);
|
||||
|
||||
// Verify
|
||||
|
||||
verify(mySink, times(1)).accept(myWorkChunkCaptor.capture());
|
||||
ExpandedResourcesList expandedResourceList = myWorkChunkCaptor.getValue();
|
||||
assertEquals(10, expandedResourceList.getStringifiedResources().size());
|
||||
assertThat(expandedResourceList.getStringifiedResources().get(0), containsString("{\"system\":\"http://static\",\"code\":\"tag\"}"));
|
||||
assertThat(expandedResourceList.getStringifiedResources().get(0), containsString("{\"system\":\"http://dynamic\",\"code\":\"tag0\"}"));
|
||||
assertThat(expandedResourceList.getStringifiedResources().get(1), containsString("{\"system\":\"http://static\",\"code\":\"tag\"}"));
|
||||
assertThat(expandedResourceList.getStringifiedResources().get(1), containsString("{\"system\":\"http://dynamic\",\"code\":\"tag1\"}"));
|
||||
|
||||
// Verify query counts
|
||||
assertEquals(theExpectedSelectQueries, myCaptureQueriesListener.countSelectQueries());
|
||||
assertEquals(0, myCaptureQueriesListener.countInsertQueries());
|
||||
assertEquals(0, myCaptureQueriesListener.countUpdateQueries());
|
||||
assertEquals(0, myCaptureQueriesListener.countDeleteQueries());
|
||||
assertEquals(2, myCaptureQueriesListener.countCommits());
|
||||
assertEquals(0, myCaptureQueriesListener.countRollbacks());
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
|
@ -1,5 +1,9 @@
|
|||
package ca.uhn.fhir.jpa.dao.r4;
|
||||
|
||||
import ca.uhn.fhir.batch2.api.StepExecutionDetails;
|
||||
import ca.uhn.fhir.batch2.jobs.export.ExpandResourcesStep;
|
||||
import ca.uhn.fhir.batch2.jobs.export.models.BulkExportJobParameters;
|
||||
import ca.uhn.fhir.batch2.jobs.export.models.ResourceIdList;
|
||||
import ca.uhn.fhir.jpa.api.config.DaoConfig;
|
||||
import ca.uhn.fhir.jpa.searchparam.SearchParameterMap;
|
||||
import ca.uhn.fhir.jpa.test.BaseJpaR4Test;
|
||||
|
@ -17,8 +21,13 @@ import org.junit.jupiter.api.BeforeEach;
|
|||
import org.junit.jupiter.api.Test;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.IntStream;
|
||||
|
||||
import static org.hamcrest.MatcherAssert.assertThat;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
@ -109,7 +118,7 @@ public class FhirResourceDaoR4SearchSqlTest extends BaseJpaR4Test {
|
|||
boolean reindexParamCache = myDaoConfig.isMarkResourcesForReindexingUponSearchParameterChange();
|
||||
myDaoConfig.setMarkResourcesForReindexingUponSearchParameterChange(false);
|
||||
|
||||
SearchParameter searchParameter = FhirResourceDaoR4TagsTest.createSearchParamForInlineResourceProfile();
|
||||
SearchParameter searchParameter = FhirResourceDaoR4TagsInlineTest.createSearchParameterForInlineProfile();
|
||||
ourLog.debug("SearchParam:\n{}", myFhirContext.newJsonParser().setPrettyPrint(true).encodeResourceToString(searchParameter));
|
||||
mySearchParameterDao.update(searchParameter, mySrd);
|
||||
mySearchParamRegistry.forceRefresh();
|
||||
|
|
|
@ -0,0 +1,236 @@
|
|||
package ca.uhn.fhir.jpa.dao.r4;
|
||||
|
||||
import ca.uhn.fhir.context.FhirContext;
|
||||
import ca.uhn.fhir.jpa.api.config.DaoConfig;
|
||||
import ca.uhn.fhir.jpa.provider.BaseResourceProviderR4Test;
|
||||
import ca.uhn.fhir.rest.gclient.TokenClientParam;
|
||||
import org.hl7.fhir.r4.model.Bundle;
|
||||
import org.hl7.fhir.r4.model.Enumerations;
|
||||
import org.hl7.fhir.r4.model.IdType;
|
||||
import org.hl7.fhir.r4.model.Patient;
|
||||
import org.hl7.fhir.r4.model.SearchParameter;
|
||||
import org.junit.jupiter.api.AfterEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
|
||||
import static ca.uhn.fhir.jpa.dao.r4.FhirResourceDaoR4TagsTest.toProfiles;
|
||||
import static ca.uhn.fhir.jpa.dao.r4.FhirResourceDaoR4TagsTest.toSecurityLabels;
|
||||
import static ca.uhn.fhir.jpa.dao.r4.FhirResourceDaoR4TagsTest.toTags;
|
||||
import static org.hamcrest.MatcherAssert.assertThat;
|
||||
import static org.hamcrest.Matchers.contains;
|
||||
import static org.hamcrest.Matchers.containsInAnyOrder;
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
|
||||
@SuppressWarnings({"Duplicates"})
|
||||
public class FhirResourceDaoR4TagsInlineTest extends BaseResourceProviderR4Test {
|
||||
|
||||
private static final org.slf4j.Logger ourLog = org.slf4j.LoggerFactory.getLogger(FhirResourceDaoR4TagsInlineTest.class);
|
||||
|
||||
@Override
|
||||
@AfterEach
|
||||
public final void after() throws Exception {
|
||||
super.after();
|
||||
myDaoConfig.setTagStorageMode(DaoConfig.DEFAULT_TAG_STORAGE_MODE);
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testInlineTags_StoreAndRetrieve() {
|
||||
myDaoConfig.setTagStorageMode(DaoConfig.TagStorageModeEnum.INLINE);
|
||||
|
||||
// Store a first version
|
||||
Patient patient = new Patient();
|
||||
patient.setId("Patient/A");
|
||||
patient.getMeta().addProfile("http://profile1");
|
||||
patient.getMeta().addTag("http://tag1", "vtag1", "dtag1");
|
||||
patient.getMeta().addSecurity("http://sec1", "vsec1", "dsec1");
|
||||
patient.setActive(true);
|
||||
myPatientDao.update(patient, mySrd);
|
||||
|
||||
runInTransaction(() -> {
|
||||
assertEquals(0, myResourceTagDao.count());
|
||||
assertEquals(0, myResourceHistoryTagDao.count());
|
||||
assertEquals(0, myTagDefinitionDao.count());
|
||||
});
|
||||
|
||||
// Read it back
|
||||
patient = myPatientDao.read(new IdType("Patient/A/_history/1"), mySrd);
|
||||
assertThat(toProfiles(patient).toString(), toProfiles(patient), contains("http://profile1"));
|
||||
assertThat(toTags(patient).toString(), toTags(patient), contains("http://tag1|vtag1|dtag1"));
|
||||
assertThat(toSecurityLabels(patient).toString(), toSecurityLabels(patient), contains("http://sec1|vsec1|dsec1"));
|
||||
|
||||
// Store a second version
|
||||
patient = new Patient();
|
||||
patient.setId("Patient/A");
|
||||
patient.getMeta().addProfile("http://profile2");
|
||||
patient.getMeta().addTag("http://tag2", "vtag2", "dtag2");
|
||||
patient.getMeta().addSecurity("http://sec2", "vsec2", "dsec2");
|
||||
patient.setActive(true);
|
||||
myPatientDao.update(patient, mySrd);
|
||||
|
||||
runInTransaction(() -> {
|
||||
assertEquals(0, myResourceTagDao.count());
|
||||
assertEquals(0, myResourceHistoryTagDao.count());
|
||||
assertEquals(0, myTagDefinitionDao.count());
|
||||
});
|
||||
|
||||
// First version should have only the initial tags
|
||||
patient = myPatientDao.read(new IdType("Patient/A/_history/1"), mySrd);
|
||||
assertThat(toProfiles(patient).toString(), toProfiles(patient), contains("http://profile1"));
|
||||
assertThat(toTags(patient).toString(), toTags(patient), contains("http://tag1|vtag1|dtag1"));
|
||||
assertThat(toSecurityLabels(patient).toString(), toSecurityLabels(patient), contains("http://sec1|vsec1|dsec1"));
|
||||
|
||||
// Second version should have the new set of tags
|
||||
// TODO: We could copy these forward like we do for non-inline mode. Perhaps in the future.
|
||||
patient = myPatientDao.read(new IdType("Patient/A/_history/2"), mySrd);
|
||||
assertThat(toProfiles(patient).toString(), toProfiles(patient), contains("http://profile2"));
|
||||
assertThat(toTags(patient).toString(), toTags(patient), containsInAnyOrder("http://tag2|vtag2|dtag2"));
|
||||
assertThat(toSecurityLabels(patient).toString(), toSecurityLabels(patient), containsInAnyOrder("http://sec2|vsec2|dsec2"));
|
||||
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testInlineTags_Search_Tag() {
|
||||
myDaoConfig.setTagStorageMode(DaoConfig.TagStorageModeEnum.INLINE);
|
||||
|
||||
SearchParameter searchParameter = createSearchParameterForInlineTag();
|
||||
ourLog.debug("SearchParam:\n{}", myFhirContext.newJsonParser().setPrettyPrint(true).encodeResourceToString(searchParameter));
|
||||
mySearchParameterDao.update(searchParameter, mySrd);
|
||||
mySearchParamRegistry.forceRefresh();
|
||||
|
||||
createPatientsForInlineSearchTests();
|
||||
|
||||
logAllTokenIndexes();
|
||||
|
||||
// Perform a search
|
||||
Bundle outcome = myClient.search().forResource("Patient").where(new TokenClientParam("_tag").exactly().systemAndCode("http://tag1", "vtag1")).returnBundle(Bundle.class).execute();
|
||||
assertThat(toUnqualifiedVersionlessIdValues(outcome), containsInAnyOrder("Patient/A", "Patient/B"));
|
||||
|
||||
validatePatientSearchResultsForInlineTags(outcome);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInlineTags_Search_Profile() {
|
||||
myDaoConfig.setTagStorageMode(DaoConfig.TagStorageModeEnum.INLINE);
|
||||
|
||||
SearchParameter searchParameter = createSearchParameterForInlineProfile();
|
||||
ourLog.debug("SearchParam:\n{}", myFhirContext.newJsonParser().setPrettyPrint(true).encodeResourceToString(searchParameter));
|
||||
mySearchParameterDao.update(searchParameter, mySrd);
|
||||
mySearchParamRegistry.forceRefresh();
|
||||
|
||||
createPatientsForInlineSearchTests();
|
||||
|
||||
logAllTokenIndexes();
|
||||
|
||||
// Perform a search
|
||||
Bundle outcome = myClient.search().forResource("Patient").where(new TokenClientParam("_profile").exactly().code("http://profile1")).returnBundle(Bundle.class).execute();
|
||||
assertThat(toUnqualifiedVersionlessIdValues(outcome), containsInAnyOrder("Patient/A", "Patient/B"));
|
||||
validatePatientSearchResultsForInlineTags(outcome);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInlineTags_Search_Security() {
|
||||
myDaoConfig.setTagStorageMode(DaoConfig.TagStorageModeEnum.INLINE);
|
||||
|
||||
SearchParameter searchParameter = createSearchParameterForInlineSecurity();
|
||||
ourLog.debug("SearchParam:\n{}", myFhirContext.newJsonParser().setPrettyPrint(true).encodeResourceToString(searchParameter));
|
||||
mySearchParameterDao.update(searchParameter, mySrd);
|
||||
mySearchParamRegistry.forceRefresh();
|
||||
|
||||
createPatientsForInlineSearchTests();
|
||||
|
||||
logAllTokenIndexes();
|
||||
|
||||
// Perform a search
|
||||
Bundle outcome = myClient.search().forResource("Patient").where(new TokenClientParam("_security").exactly().systemAndCode("http://sec1", "vsec1")).returnBundle(Bundle.class).execute();
|
||||
assertThat(toUnqualifiedVersionlessIdValues(outcome), containsInAnyOrder("Patient/A", "Patient/B"));
|
||||
|
||||
validatePatientSearchResultsForInlineTags(outcome);
|
||||
}
|
||||
|
||||
private void validatePatientSearchResultsForInlineTags(Bundle outcome) {
|
||||
Patient patient;
|
||||
patient = (Patient) outcome.getEntry().get(0).getResource();
|
||||
assertThat(toProfiles(patient).toString(), toProfiles(patient), contains("http://profile1"));
|
||||
assertThat(toTags(patient).toString(), toTags(patient), contains("http://tag1|vtag1|dtag1"));
|
||||
assertThat(toSecurityLabels(patient).toString(), toSecurityLabels(patient), contains("http://sec1|vsec1|dsec1"));
|
||||
patient = (Patient) outcome.getEntry().get(1).getResource();
|
||||
assertThat(toProfiles(patient).toString(), toProfiles(patient), contains("http://profile1"));
|
||||
assertThat(toTags(patient).toString(), toTags(patient), contains("http://tag1|vtag1|dtag1"));
|
||||
assertThat(toSecurityLabels(patient).toString(), toSecurityLabels(patient), contains("http://sec1|vsec1|dsec1"));
|
||||
}
|
||||
|
||||
private void createPatientsForInlineSearchTests() {
|
||||
Patient patient = new Patient();
|
||||
patient.setId("Patient/A");
|
||||
patient.getMeta().addProfile("http://profile1");
|
||||
patient.getMeta().addTag("http://tag1", "vtag1", "dtag1");
|
||||
patient.getMeta().addSecurity("http://sec1", "vsec1", "dsec1");
|
||||
patient.setActive(true);
|
||||
myPatientDao.update(patient, mySrd);
|
||||
|
||||
patient = new Patient();
|
||||
patient.setId("Patient/B");
|
||||
patient.getMeta().addProfile("http://profile1");
|
||||
patient.getMeta().addTag("http://tag1", "vtag1", "dtag1");
|
||||
patient.getMeta().addSecurity("http://sec1", "vsec1", "dsec1");
|
||||
patient.setActive(true);
|
||||
myPatientDao.update(patient, mySrd);
|
||||
|
||||
patient = new Patient();
|
||||
patient.setId("Patient/NO");
|
||||
patient.getMeta().addProfile("http://profile99");
|
||||
patient.getMeta().addTag("http://tag99", "vtag99", "dtag99");
|
||||
patient.getMeta().addSecurity("http://sec99", "vsec99", "dsec99");
|
||||
patient.setActive(true);
|
||||
myPatientDao.update(patient, mySrd);
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
public static SearchParameter createSearchParameterForInlineTag() {
|
||||
SearchParameter searchParameter = new SearchParameter();
|
||||
searchParameter.setId("SearchParameter/resource-tag");
|
||||
for (String next : FhirContext.forR4Cached().getResourceTypes().stream().sorted().toList()) {
|
||||
searchParameter.addBase(next);
|
||||
}
|
||||
searchParameter.setStatus(Enumerations.PublicationStatus.ACTIVE);
|
||||
searchParameter.setType(Enumerations.SearchParamType.TOKEN);
|
||||
searchParameter.setCode("_tag");
|
||||
searchParameter.setName("Tag");
|
||||
searchParameter.setExpression("meta.tag");
|
||||
return searchParameter;
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
public static SearchParameter createSearchParameterForInlineSecurity() {
|
||||
SearchParameter searchParameter = new SearchParameter();
|
||||
searchParameter.setId("SearchParameter/resource-security");
|
||||
for (String next : FhirContext.forR4Cached().getResourceTypes().stream().sorted().toList()) {
|
||||
searchParameter.addBase(next);
|
||||
}
|
||||
searchParameter.setStatus(Enumerations.PublicationStatus.ACTIVE);
|
||||
searchParameter.setType(Enumerations.SearchParamType.TOKEN);
|
||||
searchParameter.setCode("_security");
|
||||
searchParameter.setName("Security");
|
||||
searchParameter.setExpression("meta.security");
|
||||
return searchParameter;
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
public static SearchParameter createSearchParameterForInlineProfile() {
|
||||
SearchParameter searchParameter = new SearchParameter();
|
||||
searchParameter.setId("SearchParameter/resource-profile");
|
||||
for (String next : FhirContext.forR4Cached().getResourceTypes().stream().sorted().toList()) {
|
||||
searchParameter.addBase(next);
|
||||
}
|
||||
searchParameter.setStatus(Enumerations.PublicationStatus.ACTIVE);
|
||||
searchParameter.setType(Enumerations.SearchParamType.URI);
|
||||
searchParameter.setCode("_profile");
|
||||
searchParameter.setName("Profile");
|
||||
searchParameter.setExpression("meta.profile");
|
||||
return searchParameter;
|
||||
}
|
||||
|
||||
}
|
|
@ -351,82 +351,6 @@ public class FhirResourceDaoR4TagsTest extends BaseResourceProviderR4Test {
|
|||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testInlineTags_StoreAndRetrieve() {
|
||||
myDaoConfig.setTagStorageMode(DaoConfig.TagStorageModeEnum.INLINE);
|
||||
|
||||
// Store a first version
|
||||
Patient patient = new Patient();
|
||||
patient.setId("Patient/A");
|
||||
patient.getMeta().addProfile("http://profile1");
|
||||
patient.getMeta().addTag("http://tag1", "vtag1", "dtag1");
|
||||
patient.getMeta().addSecurity("http://sec1", "vsec1", "dsec1");
|
||||
patient.setActive(true);
|
||||
myPatientDao.update(patient, mySrd);
|
||||
|
||||
runInTransaction(() -> {
|
||||
assertEquals(0, myResourceTagDao.count());
|
||||
assertEquals(0, myResourceHistoryTagDao.count());
|
||||
assertEquals(0, myTagDefinitionDao.count());
|
||||
});
|
||||
|
||||
// Read it back
|
||||
patient = myPatientDao.read(new IdType("Patient/A/_history/1"), mySrd);
|
||||
assertThat(toProfiles(patient).toString(), toProfiles(patient), contains("http://profile1"));
|
||||
assertThat(toTags(patient).toString(), toTags(patient), contains("http://tag1|vtag1|dtag1"));
|
||||
assertThat(toSecurityLabels(patient).toString(), toSecurityLabels(patient), contains("http://sec1|vsec1|dsec1"));
|
||||
|
||||
// Store a second version
|
||||
patient = new Patient();
|
||||
patient.setId("Patient/A");
|
||||
patient.getMeta().addProfile("http://profile2");
|
||||
patient.getMeta().addTag("http://tag2", "vtag2", "dtag2");
|
||||
patient.getMeta().addSecurity("http://sec2", "vsec2", "dsec2");
|
||||
patient.setActive(true);
|
||||
myPatientDao.update(patient, mySrd);
|
||||
|
||||
runInTransaction(() -> {
|
||||
assertEquals(0, myResourceTagDao.count());
|
||||
assertEquals(0, myResourceHistoryTagDao.count());
|
||||
assertEquals(0, myTagDefinitionDao.count());
|
||||
});
|
||||
|
||||
// First version should have only the initial tags
|
||||
patient = myPatientDao.read(new IdType("Patient/A/_history/1"), mySrd);
|
||||
assertThat(toProfiles(patient).toString(), toProfiles(patient), contains("http://profile1"));
|
||||
assertThat(toTags(patient).toString(), toTags(patient), contains("http://tag1|vtag1|dtag1"));
|
||||
assertThat(toSecurityLabels(patient).toString(), toSecurityLabels(patient), contains("http://sec1|vsec1|dsec1"));
|
||||
|
||||
// Second version should have the new set of tags
|
||||
// TODO: We could copy these forward like we do for non-inline mode. Perhaps in the future.
|
||||
patient = myPatientDao.read(new IdType("Patient/A/_history/2"), mySrd);
|
||||
assertThat(toProfiles(patient).toString(), toProfiles(patient), contains("http://profile2"));
|
||||
assertThat(toTags(patient).toString(), toTags(patient), containsInAnyOrder("http://tag2|vtag2|dtag2"));
|
||||
assertThat(toSecurityLabels(patient).toString(), toSecurityLabels(patient), containsInAnyOrder("http://sec2|vsec2|dsec2"));
|
||||
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testInlineTags_Search_Tag() {
|
||||
myDaoConfig.setTagStorageMode(DaoConfig.TagStorageModeEnum.INLINE);
|
||||
|
||||
SearchParameter searchParameter = createResourceTagSearchParameter();
|
||||
ourLog.debug("SearchParam:\n{}", myFhirContext.newJsonParser().setPrettyPrint(true).encodeResourceToString(searchParameter));
|
||||
mySearchParameterDao.update(searchParameter, mySrd);
|
||||
mySearchParamRegistry.forceRefresh();
|
||||
|
||||
createPatientsForInlineSearchTests();
|
||||
|
||||
logAllTokenIndexes();
|
||||
|
||||
// Perform a search
|
||||
Bundle outcome = myClient.search().forResource("Patient").where(new TokenClientParam("_tag").exactly().systemAndCode("http://tag1", "vtag1")).returnBundle(Bundle.class).execute();
|
||||
assertThat(toUnqualifiedVersionlessIdValues(outcome), containsInAnyOrder("Patient/A", "Patient/B"));
|
||||
|
||||
validatePatientSearchResultsForInlineTags(outcome);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMetaDelete_TagStorageModeNonVersioned_ShouldShowRemainingTagsInGetAllResources() {
|
||||
myDaoConfig.setTagStorageMode(DaoConfig.TagStorageModeEnum.NON_VERSIONED);
|
||||
|
@ -472,91 +396,7 @@ public class FhirResourceDaoR4TagsTest extends BaseResourceProviderR4Test {
|
|||
assertEquals(1, patient.getMeta().getTag().size());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInlineTags_Search_Profile() {
|
||||
myDaoConfig.setTagStorageMode(DaoConfig.TagStorageModeEnum.INLINE);
|
||||
|
||||
SearchParameter searchParameter = createSearchParamForInlineResourceProfile();
|
||||
ourLog.debug("SearchParam:\n{}", myFhirContext.newJsonParser().setPrettyPrint(true).encodeResourceToString(searchParameter));
|
||||
mySearchParameterDao.update(searchParameter, mySrd);
|
||||
mySearchParamRegistry.forceRefresh();
|
||||
|
||||
createPatientsForInlineSearchTests();
|
||||
|
||||
logAllTokenIndexes();
|
||||
|
||||
// Perform a search
|
||||
Bundle outcome = myClient.search().forResource("Patient").where(new TokenClientParam("_profile").exactly().code("http://profile1")).returnBundle(Bundle.class).execute();
|
||||
assertThat(toUnqualifiedVersionlessIdValues(outcome), containsInAnyOrder("Patient/A", "Patient/B"));
|
||||
validatePatientSearchResultsForInlineTags(outcome);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInlineTags_Search_Security() {
|
||||
myDaoConfig.setTagStorageMode(DaoConfig.TagStorageModeEnum.INLINE);
|
||||
|
||||
SearchParameter searchParameter = new SearchParameter();
|
||||
searchParameter.setId("SearchParameter/resource-security");
|
||||
for (String next : myFhirContext.getResourceTypes().stream().sorted().collect(Collectors.toList())) {
|
||||
searchParameter.addBase(next);
|
||||
}
|
||||
searchParameter.setStatus(Enumerations.PublicationStatus.ACTIVE);
|
||||
searchParameter.setType(Enumerations.SearchParamType.TOKEN);
|
||||
searchParameter.setCode("_security");
|
||||
searchParameter.setName("Security");
|
||||
searchParameter.setExpression("meta.security");
|
||||
ourLog.debug("SearchParam:\n{}", myFhirContext.newJsonParser().setPrettyPrint(true).encodeResourceToString(searchParameter));
|
||||
mySearchParameterDao.update(searchParameter, mySrd);
|
||||
mySearchParamRegistry.forceRefresh();
|
||||
|
||||
createPatientsForInlineSearchTests();
|
||||
|
||||
logAllTokenIndexes();
|
||||
|
||||
// Perform a search
|
||||
Bundle outcome = myClient.search().forResource("Patient").where(new TokenClientParam("_security").exactly().systemAndCode("http://sec1", "vsec1")).returnBundle(Bundle.class).execute();
|
||||
assertThat(toUnqualifiedVersionlessIdValues(outcome), containsInAnyOrder("Patient/A", "Patient/B"));
|
||||
|
||||
validatePatientSearchResultsForInlineTags(outcome);
|
||||
}
|
||||
|
||||
private void validatePatientSearchResultsForInlineTags(Bundle outcome) {
|
||||
Patient patient;
|
||||
patient = (Patient) outcome.getEntry().get(0).getResource();
|
||||
assertThat(toProfiles(patient).toString(), toProfiles(patient), contains("http://profile1"));
|
||||
assertThat(toTags(patient).toString(), toTags(patient), contains("http://tag1|vtag1|dtag1"));
|
||||
assertThat(toSecurityLabels(patient).toString(), toSecurityLabels(patient), contains("http://sec1|vsec1|dsec1"));
|
||||
patient = (Patient) outcome.getEntry().get(1).getResource();
|
||||
assertThat(toProfiles(patient).toString(), toProfiles(patient), contains("http://profile1"));
|
||||
assertThat(toTags(patient).toString(), toTags(patient), contains("http://tag1|vtag1|dtag1"));
|
||||
assertThat(toSecurityLabels(patient).toString(), toSecurityLabels(patient), contains("http://sec1|vsec1|dsec1"));
|
||||
}
|
||||
|
||||
private void createPatientsForInlineSearchTests() {
|
||||
Patient patient = new Patient();
|
||||
patient.setId("Patient/A");
|
||||
patient.getMeta().addProfile("http://profile1");
|
||||
patient.getMeta().addTag("http://tag1", "vtag1", "dtag1");
|
||||
patient.getMeta().addSecurity("http://sec1", "vsec1", "dsec1");
|
||||
patient.setActive(true);
|
||||
myPatientDao.update(patient, mySrd);
|
||||
|
||||
patient = new Patient();
|
||||
patient.setId("Patient/B");
|
||||
patient.getMeta().addProfile("http://profile1");
|
||||
patient.getMeta().addTag("http://tag1", "vtag1", "dtag1");
|
||||
patient.getMeta().addSecurity("http://sec1", "vsec1", "dsec1");
|
||||
patient.setActive(true);
|
||||
myPatientDao.update(patient, mySrd);
|
||||
|
||||
patient = new Patient();
|
||||
patient.setId("Patient/NO");
|
||||
patient.getMeta().addProfile("http://profile99");
|
||||
patient.getMeta().addTag("http://tag99", "vtag99", "dtag99");
|
||||
patient.getMeta().addSecurity("http://sec99", "vsec99", "dsec99");
|
||||
patient.setActive(true);
|
||||
myPatientDao.update(patient, mySrd);
|
||||
}
|
||||
|
||||
private void initializeNonVersioned() {
|
||||
myDaoConfig.setTagStorageMode(DaoConfig.TagStorageModeEnum.NON_VERSIONED);
|
||||
|
@ -595,63 +435,33 @@ public class FhirResourceDaoR4TagsTest extends BaseResourceProviderR4Test {
|
|||
}
|
||||
|
||||
@Nonnull
|
||||
private List<String> toTags(Patient patient) {
|
||||
static List<String> toTags(Patient patient) {
|
||||
return toTags(patient.getMeta());
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
private List<String> toSecurityLabels(Patient patient) {
|
||||
static List<String> toSecurityLabels(Patient patient) {
|
||||
return toSecurityLabels(patient.getMeta());
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
private List<String> toProfiles(Patient patient) {
|
||||
static List<String> toProfiles(Patient patient) {
|
||||
return toProfiles(patient.getMeta());
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
private static List<String> toTags(Meta meta) {
|
||||
static List<String> toTags(Meta meta) {
|
||||
return meta.getTag().stream().map(t -> t.getSystem() + "|" + t.getCode() + "|" + t.getDisplay()).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
private static List<String> toSecurityLabels(Meta meta) {
|
||||
static List<String> toSecurityLabels(Meta meta) {
|
||||
return meta.getSecurity().stream().map(t -> t.getSystem() + "|" + t.getCode() + "|" + t.getDisplay()).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
private static List<String> toProfiles(Meta meta) {
|
||||
static List<String> toProfiles(Meta meta) {
|
||||
return meta.getProfile().stream().map(t -> t.getValue()).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
public static SearchParameter createSearchParamForInlineResourceProfile() {
|
||||
SearchParameter searchParameter = new SearchParameter();
|
||||
searchParameter.setId("SearchParameter/resource-profile");
|
||||
for (String next : FhirContext.forR4Cached().getResourceTypes().stream().sorted().collect(Collectors.toList())) {
|
||||
searchParameter.addBase(next);
|
||||
}
|
||||
searchParameter.setStatus(Enumerations.PublicationStatus.ACTIVE);
|
||||
searchParameter.setType(Enumerations.SearchParamType.URI);
|
||||
searchParameter.setCode("_profile");
|
||||
searchParameter.setName("Profile");
|
||||
searchParameter.setExpression("meta.profile");
|
||||
return searchParameter;
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
public static SearchParameter createResourceTagSearchParameter() {
|
||||
SearchParameter searchParameter = new SearchParameter();
|
||||
searchParameter.setId("SearchParameter/resource-tag");
|
||||
for (String next : FhirContext.forR4Cached().getResourceTypes().stream().sorted().collect(Collectors.toList())) {
|
||||
searchParameter.addBase(next);
|
||||
}
|
||||
searchParameter.setStatus(Enumerations.PublicationStatus.ACTIVE);
|
||||
searchParameter.setType(Enumerations.SearchParamType.TOKEN);
|
||||
searchParameter.setCode("_tag");
|
||||
searchParameter.setName("Tag");
|
||||
searchParameter.setExpression("meta.tag");
|
||||
return searchParameter;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-fhir</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
<parent>
|
||||
<artifactId>hapi-fhir-serviceloaders</artifactId>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
<parent>
|
||||
<artifactId>hapi-fhir-serviceloaders</artifactId>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
@ -20,7 +20,7 @@
|
|||
<dependency>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-fhir-caching-api</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.ben-manes.caffeine</groupId>
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
<parent>
|
||||
<artifactId>hapi-fhir-serviceloaders</artifactId>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
<parent>
|
||||
<artifactId>hapi-fhir</artifactId>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<parent>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-fhir-spring-boot-samples</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>hapi-fhir-spring-boot-sample-client-apache</artifactId>
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-fhir-spring-boot-samples</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>hapi-fhir-spring-boot-sample-client-okhttp</artifactId>
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-fhir-spring-boot-samples</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>hapi-fhir-spring-boot-sample-server-jersey</artifactId>
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-fhir-spring-boot</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>hapi-fhir-spring-boot-samples</artifactId>
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-fhir</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
|
|
@ -57,13 +57,13 @@ public class BulkExportCreateReportStep implements IReductionStepWorker<BulkExpo
|
|||
results.setOriginalRequestUrl(requestUrl);
|
||||
|
||||
if (myResourceToBinaryIds != null) {
|
||||
ourLog.info("Bulk Export Report creation step");
|
||||
ourLog.info("Bulk Export Report creation step for instance: {}", theStepExecutionDetails.getInstance().getInstanceId());
|
||||
|
||||
results.setResourceTypeToBinaryIds(myResourceToBinaryIds);
|
||||
|
||||
myResourceToBinaryIds = null;
|
||||
} else {
|
||||
String msg = "Export complete, but no data to generate report.";
|
||||
String msg = "Export complete, but no data to generate report for job instance: " + theStepExecutionDetails.getInstance().getInstanceId();
|
||||
ourLog.warn(msg);
|
||||
|
||||
results.setReportMsg(msg);
|
||||
|
|
|
@ -28,14 +28,20 @@ import ca.uhn.fhir.batch2.api.StepExecutionDetails;
|
|||
import ca.uhn.fhir.batch2.jobs.export.models.BulkExportJobParameters;
|
||||
import ca.uhn.fhir.batch2.jobs.export.models.ExpandedResourcesList;
|
||||
import ca.uhn.fhir.batch2.jobs.export.models.ResourceIdList;
|
||||
import ca.uhn.fhir.batch2.jobs.models.BatchResourceId;
|
||||
import ca.uhn.fhir.context.FhirContext;
|
||||
import ca.uhn.fhir.jpa.api.dao.DaoRegistry;
|
||||
import ca.uhn.fhir.jpa.api.dao.IFhirResourceDao;
|
||||
import ca.uhn.fhir.jpa.api.model.PersistentIdToForcedIdMap;
|
||||
import ca.uhn.fhir.jpa.api.svc.IIdHelperService;
|
||||
import ca.uhn.fhir.jpa.bulk.export.api.IBulkExportProcessor;
|
||||
import ca.uhn.fhir.jpa.dao.tx.IHapiTransactionService;
|
||||
import ca.uhn.fhir.jpa.model.entity.ModelConfig;
|
||||
import ca.uhn.fhir.jpa.searchparam.SearchParameterMap;
|
||||
import ca.uhn.fhir.parser.IParser;
|
||||
import ca.uhn.fhir.rest.api.server.IBundleProvider;
|
||||
import ca.uhn.fhir.rest.api.server.SystemRequestDetails;
|
||||
import ca.uhn.fhir.rest.api.server.storage.IResourcePersistentId;
|
||||
import ca.uhn.fhir.rest.param.TokenOrListParam;
|
||||
import ca.uhn.fhir.rest.server.interceptor.ResponseTerminologyTranslationSvc;
|
||||
import com.google.common.collect.ArrayListMultimap;
|
||||
import com.google.common.collect.ListMultimap;
|
||||
|
@ -47,7 +53,11 @@ import org.springframework.context.ApplicationContext;
|
|||
import javax.annotation.Nonnull;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static ca.uhn.fhir.rest.api.Constants.PARAM_ID;
|
||||
import static org.slf4j.LoggerFactory.getLogger;
|
||||
|
||||
public class ExpandResourcesStep implements IJobStepWorker<BulkExportJobParameters, ResourceIdList, ExpandedResourcesList> {
|
||||
|
@ -71,6 +81,9 @@ public class ExpandResourcesStep implements IJobStepWorker<BulkExportJobParamete
|
|||
@Autowired
|
||||
private IIdHelperService myIdHelperService;
|
||||
|
||||
@Autowired
|
||||
private IHapiTransactionService myTransactionService;
|
||||
|
||||
private volatile ResponseTerminologyTranslationSvc myResponseTerminologyTranslationSvc;
|
||||
|
||||
@Nonnull
|
||||
|
@ -102,7 +115,7 @@ public class ExpandResourcesStep implements IJobStepWorker<BulkExportJobParamete
|
|||
terminologyTranslationSvc.processResourcesForTerminologyTranslation(allResources);
|
||||
}
|
||||
|
||||
// encode them
|
||||
// encode them - Key is resource type, Value is a collection of serialized resources of that type
|
||||
ListMultimap<String, String> resources = encodeToString(allResources, jobParameters);
|
||||
|
||||
// set to datasink
|
||||
|
@ -125,12 +138,46 @@ public class ExpandResourcesStep implements IJobStepWorker<BulkExportJobParamete
|
|||
}
|
||||
|
||||
private List<IBaseResource> fetchAllResources(ResourceIdList theIds) {
|
||||
List<IBaseResource> resources = new ArrayList<>();
|
||||
ArrayListMultimap<String, String> typeToIds = ArrayListMultimap.create();
|
||||
theIds.getIds().forEach(t -> typeToIds.put(t.getResourceType(), t.getId()));
|
||||
|
||||
List<IBaseResource> resources = new ArrayList<>(theIds.getIds().size());
|
||||
|
||||
for (String resourceType : typeToIds.keySet()) {
|
||||
|
||||
IFhirResourceDao<?> dao = myDaoRegistry.getResourceDao(resourceType);
|
||||
List<String> allIds = typeToIds.get(resourceType);
|
||||
while (!allIds.isEmpty()) {
|
||||
|
||||
// Load in batches in order to avoid having too many PIDs go into a
|
||||
// single SQ statement at once
|
||||
int batchSize = Math.min(500, allIds.size());
|
||||
|
||||
Set<IResourcePersistentId> nextBatchOfPids =
|
||||
allIds
|
||||
.subList(0, batchSize)
|
||||
.stream()
|
||||
.map(t -> myIdHelperService.newPidFromStringIdAndResourceName(t, resourceType))
|
||||
.collect(Collectors.toSet());
|
||||
allIds = allIds.subList(batchSize, allIds.size());
|
||||
|
||||
PersistentIdToForcedIdMap nextBatchOfResourceIds = myTransactionService
|
||||
.withRequest(null)
|
||||
.execute(() -> myIdHelperService.translatePidsToForcedIds(nextBatchOfPids));
|
||||
|
||||
TokenOrListParam idListParam = new TokenOrListParam();
|
||||
for (IResourcePersistentId nextPid : nextBatchOfPids) {
|
||||
Optional<String> resourceId = nextBatchOfResourceIds.get(nextPid);
|
||||
idListParam.add(resourceId.orElse(nextPid.getId().toString()));
|
||||
}
|
||||
|
||||
SearchParameterMap spMap = SearchParameterMap
|
||||
.newSynchronous()
|
||||
.add(PARAM_ID, idListParam);
|
||||
IBundleProvider outcome = dao.search(spMap, new SystemRequestDetails());
|
||||
resources.addAll(outcome.getAllResources());
|
||||
}
|
||||
|
||||
for (BatchResourceId batchResourceId : theIds.getIds()) {
|
||||
IFhirResourceDao<?> dao = myDaoRegistry.getResourceDao(batchResourceId.getResourceType());
|
||||
// This should be a query, but we have PIDs, and we don't have a _pid search param. TODO GGG, figure out how to make this search by pid.
|
||||
resources.add(dao.readByPid(myIdHelperService.newPidFromStringIdAndResourceName(batchResourceId.getId(), batchResourceId.getResourceType())));
|
||||
}
|
||||
|
||||
return resources;
|
||||
|
|
|
@ -12,12 +12,17 @@ import ca.uhn.fhir.batch2.model.JobInstance;
|
|||
import ca.uhn.fhir.context.FhirContext;
|
||||
import ca.uhn.fhir.jpa.api.dao.DaoRegistry;
|
||||
import ca.uhn.fhir.jpa.api.dao.IFhirResourceDao;
|
||||
import ca.uhn.fhir.jpa.api.model.PersistentIdToForcedIdMap;
|
||||
import ca.uhn.fhir.jpa.api.svc.IIdHelperService;
|
||||
import ca.uhn.fhir.jpa.bulk.export.api.IBulkExportProcessor;
|
||||
import ca.uhn.fhir.jpa.dao.tx.IHapiTransactionService;
|
||||
import ca.uhn.fhir.jpa.dao.tx.NonTransactionalHapiTransactionService;
|
||||
import ca.uhn.fhir.jpa.model.dao.JpaPid;
|
||||
import ca.uhn.fhir.jpa.model.entity.ModelConfig;
|
||||
import ca.uhn.fhir.rest.api.server.bulk.BulkDataExportOptions;
|
||||
import ca.uhn.fhir.rest.api.server.storage.BaseResourcePersistentId;
|
||||
import ca.uhn.fhir.rest.api.server.storage.IResourcePersistentId;
|
||||
import ca.uhn.fhir.rest.server.SimpleBundleProvider;
|
||||
import ca.uhn.fhir.rest.server.interceptor.ResponseTerminologyTranslationSvc;
|
||||
import org.hl7.fhir.instance.model.api.IBaseResource;
|
||||
import org.hl7.fhir.r4.model.Patient;
|
||||
|
@ -32,6 +37,10 @@ import org.mockito.junit.jupiter.MockitoExtension;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||
|
@ -62,6 +71,9 @@ public class ExpandResourcesStepTest {
|
|||
@Spy
|
||||
private ModelConfig myModelConfig = new ModelConfig();
|
||||
|
||||
@Spy
|
||||
private IHapiTransactionService myTransactionService = new NonTransactionalHapiTransactionService();
|
||||
|
||||
@InjectMocks
|
||||
private ExpandResourcesStep mySecondStep;
|
||||
|
||||
|
@ -122,9 +134,17 @@ public class ExpandResourcesStepTest {
|
|||
createParameters(),
|
||||
instance
|
||||
);
|
||||
ArrayList<IBaseResource> clone = new ArrayList<>(resources);
|
||||
when(patientDao.readByPid(any(BaseResourcePersistentId.class))).thenAnswer(i -> clone.remove(0));
|
||||
when(patientDao.search(any(), any())).thenReturn(new SimpleBundleProvider(resources));
|
||||
when(myIdHelperService.newPidFromStringIdAndResourceName(anyString(), anyString())).thenReturn(JpaPid.fromId(1L));
|
||||
when(myIdHelperService.translatePidsToForcedIds(any())).thenAnswer(t->{
|
||||
Set<IResourcePersistentId<?>> inputSet = t.getArgument(0, Set.class);
|
||||
Map<IResourcePersistentId<?>, Optional<String>> map = new HashMap<>();
|
||||
for (var next : inputSet) {
|
||||
map.put(next, Optional.empty());
|
||||
}
|
||||
return new PersistentIdToForcedIdMap<>(map);
|
||||
});
|
||||
|
||||
// test
|
||||
RunOutcome outcome = mySecondStep.run(input, sink);
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@ package ca.uhn.fhir.batch2.api;
|
|||
* #L%
|
||||
*/
|
||||
|
||||
import ca.uhn.fhir.batch2.model.JobDefinition;
|
||||
import ca.uhn.fhir.batch2.model.StatusEnum;
|
||||
|
||||
import java.util.Date;
|
||||
|
@ -56,8 +55,6 @@ public interface IJobInstance {
|
|||
|
||||
String getErrorMessage();
|
||||
|
||||
JobDefinition<?> getJobDefinition();
|
||||
|
||||
boolean isCancelled();
|
||||
|
||||
String getReport();
|
||||
|
|
|
@ -155,7 +155,7 @@ public interface IJobPersistence {
|
|||
* @param theChunkId The chunk ID
|
||||
* @param theRecordsProcessed The number of records completed during chunk processing
|
||||
*/
|
||||
void markWorkChunkAsCompletedAndClearData(String theChunkId, int theRecordsProcessed);
|
||||
void markWorkChunkAsCompletedAndClearData(String theInstanceId, String theChunkId, int theRecordsProcessed);
|
||||
|
||||
/**
|
||||
* Marks all work chunks with the provided status and erases the data
|
||||
|
@ -236,7 +236,7 @@ public interface IJobPersistence {
|
|||
*
|
||||
* @param theInstanceId The instance ID
|
||||
*/
|
||||
void deleteChunks(String theInstanceId);
|
||||
void deleteChunksAndMarkInstanceAsChunksPurged(String theInstanceId);
|
||||
|
||||
/**
|
||||
* Marks an instance as being complete
|
||||
|
@ -257,4 +257,6 @@ public interface IJobPersistence {
|
|||
JobOperationResultJson cancelInstance(String theInstanceId);
|
||||
|
||||
List<String> fetchallchunkidsforstepWithStatus(String theInstanceId, String theStepId, StatusEnum theStatusEnum);
|
||||
|
||||
void updateInstanceUpdateTime(String theInstanceId);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
package ca.uhn.fhir.batch2.api;
|
||||
|
||||
/*-
|
||||
* #%L
|
||||
* HAPI FHIR JPA Server - Batch2 Task Processor
|
||||
* %%
|
||||
* Copyright (C) 2014 - 2023 Smile CDR, Inc.
|
||||
* %%
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
* #L%
|
||||
*/
|
||||
|
||||
import ca.uhn.fhir.batch2.model.JobWorkCursor;
|
||||
|
||||
public interface IReductionStepExecutorService {
|
||||
void triggerReductionStep(String theInstanceId, JobWorkCursor<?, ?, ?> theJobWorkCursor);
|
||||
|
||||
void reducerPass();
|
||||
}
|
|
@ -23,13 +23,16 @@ package ca.uhn.fhir.batch2.config;
|
|||
import ca.uhn.fhir.batch2.api.IJobCoordinator;
|
||||
import ca.uhn.fhir.batch2.api.IJobMaintenanceService;
|
||||
import ca.uhn.fhir.batch2.api.IJobPersistence;
|
||||
import ca.uhn.fhir.batch2.api.IReductionStepExecutorService;
|
||||
import ca.uhn.fhir.batch2.channel.BatchJobSender;
|
||||
import ca.uhn.fhir.batch2.coordinator.JobCoordinatorImpl;
|
||||
import ca.uhn.fhir.batch2.coordinator.JobDefinitionRegistry;
|
||||
import ca.uhn.fhir.batch2.coordinator.ReductionStepExecutorServiceImpl;
|
||||
import ca.uhn.fhir.batch2.coordinator.WorkChunkProcessor;
|
||||
import ca.uhn.fhir.batch2.maintenance.JobMaintenanceServiceImpl;
|
||||
import ca.uhn.fhir.batch2.model.JobWorkNotificationJsonMessage;
|
||||
import ca.uhn.fhir.jpa.api.config.DaoConfig;
|
||||
import ca.uhn.fhir.jpa.dao.tx.IHapiTransactionService;
|
||||
import ca.uhn.fhir.jpa.model.sched.ISchedulerService;
|
||||
import ca.uhn.fhir.jpa.subscription.channel.api.ChannelConsumerSettings;
|
||||
import ca.uhn.fhir.jpa.subscription.channel.api.ChannelProducerSettings;
|
||||
|
@ -39,7 +42,6 @@ import ca.uhn.fhir.jpa.subscription.channel.api.IChannelReceiver;
|
|||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.transaction.PlatformTransactionManager;
|
||||
|
||||
@Configuration
|
||||
public abstract class BaseBatch2Config {
|
||||
|
@ -57,8 +59,8 @@ public abstract class BaseBatch2Config {
|
|||
}
|
||||
|
||||
@Bean
|
||||
public WorkChunkProcessor jobStepExecutorService(BatchJobSender theBatchJobSender, PlatformTransactionManager theTransactionManager) {
|
||||
return new WorkChunkProcessor(myPersistence, theBatchJobSender, theTransactionManager);
|
||||
public WorkChunkProcessor jobStepExecutorService(BatchJobSender theBatchJobSender, IHapiTransactionService theTransactionService) {
|
||||
return new WorkChunkProcessor(myPersistence, theBatchJobSender, theTransactionService);
|
||||
}
|
||||
|
||||
@Bean
|
||||
|
@ -80,20 +82,28 @@ public abstract class BaseBatch2Config {
|
|||
theJobMaintenanceService);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public IReductionStepExecutorService reductionStepExecutorService(IJobPersistence theJobPersistence,
|
||||
IHapiTransactionService theTransactionService,
|
||||
JobDefinitionRegistry theJobDefinitionRegistry) {
|
||||
return new ReductionStepExecutorServiceImpl(theJobPersistence, theTransactionService, theJobDefinitionRegistry);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public IJobMaintenanceService batch2JobMaintenanceService(ISchedulerService theSchedulerService,
|
||||
JobDefinitionRegistry theJobDefinitionRegistry,
|
||||
DaoConfig theDaoConfig,
|
||||
BatchJobSender theBatchJobSender,
|
||||
WorkChunkProcessor theExecutor
|
||||
WorkChunkProcessor theExecutor,
|
||||
IReductionStepExecutorService theReductionStepExecutorService
|
||||
) {
|
||||
return new JobMaintenanceServiceImpl(theSchedulerService,
|
||||
myPersistence,
|
||||
theDaoConfig,
|
||||
theJobDefinitionRegistry,
|
||||
theBatchJobSender,
|
||||
theExecutor
|
||||
);
|
||||
theExecutor,
|
||||
theReductionStepExecutorService);
|
||||
}
|
||||
|
||||
@Bean
|
||||
|
|
|
@ -33,11 +33,11 @@ import ca.uhn.fhir.batch2.model.JobWorkNotification;
|
|||
import ca.uhn.fhir.batch2.model.StatusEnum;
|
||||
import ca.uhn.fhir.batch2.models.JobInstanceFetchRequest;
|
||||
import ca.uhn.fhir.i18n.Msg;
|
||||
import ca.uhn.fhir.util.Logs;
|
||||
import ca.uhn.fhir.jpa.batch.models.Batch2JobStartResponse;
|
||||
import ca.uhn.fhir.jpa.subscription.channel.api.IChannelReceiver;
|
||||
import ca.uhn.fhir.rest.server.exceptions.InvalidRequestException;
|
||||
import ca.uhn.fhir.rest.server.exceptions.ResourceNotFoundException;
|
||||
import ca.uhn.fhir.util.Logs;
|
||||
import org.apache.commons.lang3.Validate;
|
||||
import org.slf4j.Logger;
|
||||
import org.springframework.data.domain.Page;
|
||||
|
|
|
@ -169,8 +169,9 @@ public class JobDefinitionRegistry {
|
|||
return getJobDefinition(theJobInstance.getJobDefinitionId(), theJobInstance.getJobDefinitionVersion());
|
||||
}
|
||||
|
||||
public JobDefinition<?> getJobDefinitionOrThrowException(JobInstance theJobInstance) {
|
||||
return getJobDefinitionOrThrowException(theJobInstance.getJobDefinitionId(), theJobInstance.getJobDefinitionVersion());
|
||||
@SuppressWarnings("unchecked")
|
||||
public <PT extends IModelJson> JobDefinition<PT> getJobDefinitionOrThrowException(JobInstance theJobInstance) {
|
||||
return (JobDefinition<PT>) getJobDefinitionOrThrowException(theJobInstance.getJobDefinitionId(), theJobInstance.getJobDefinitionVersion());
|
||||
}
|
||||
|
||||
public Collection<Integer> getJobDefinitionVersions(String theDefinitionId) {
|
||||
|
|
|
@ -55,7 +55,9 @@ public class JobStepExecutor<PT extends IModelJson, IT extends IModelJson, OT ex
|
|||
@Nonnull JobInstance theInstance,
|
||||
WorkChunk theWorkChunk,
|
||||
@Nonnull JobWorkCursor<PT, IT, OT> theCursor,
|
||||
@Nonnull WorkChunkProcessor theExecutor, IJobMaintenanceService theJobMaintenanceService) {
|
||||
@Nonnull WorkChunkProcessor theExecutor,
|
||||
@Nonnull IJobMaintenanceService theJobMaintenanceService,
|
||||
@Nonnull JobDefinitionRegistry theJobDefinitionRegistry) {
|
||||
myJobPersistence = theJobPersistence;
|
||||
myBatchJobSender = theBatchJobSender;
|
||||
myDefinition = theCursor.jobDefinition;
|
||||
|
@ -65,11 +67,11 @@ public class JobStepExecutor<PT extends IModelJson, IT extends IModelJson, OT ex
|
|||
myCursor = theCursor;
|
||||
myJobExecutorSvc = theExecutor;
|
||||
myJobMaintenanceService = theJobMaintenanceService;
|
||||
myJobInstanceStatusUpdater = new JobInstanceStatusUpdater(myJobPersistence);
|
||||
myJobInstanceStatusUpdater = new JobInstanceStatusUpdater(myJobPersistence, theJobDefinitionRegistry);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
void executeStep() {
|
||||
public void executeStep() {
|
||||
JobStepExecutorOutput<PT, IT, OT> stepExecutorOutput = myJobExecutorSvc.doExecution(
|
||||
myCursor,
|
||||
myInstance,
|
||||
|
|
|
@ -35,18 +35,21 @@ public class JobStepExecutorFactory {
|
|||
private final BatchJobSender myBatchJobSender;
|
||||
private final WorkChunkProcessor myJobStepExecutorSvc;
|
||||
private final IJobMaintenanceService myJobMaintenanceService;
|
||||
private final JobDefinitionRegistry myJobDefinitionRegistry;
|
||||
|
||||
public JobStepExecutorFactory(@Nonnull IJobPersistence theJobPersistence,
|
||||
@Nonnull BatchJobSender theBatchJobSender,
|
||||
@Nonnull WorkChunkProcessor theExecutorSvc,
|
||||
@Nonnull IJobMaintenanceService theJobMaintenanceService) {
|
||||
@Nonnull IJobMaintenanceService theJobMaintenanceService,
|
||||
@Nonnull JobDefinitionRegistry theJobDefinitionRegistry) {
|
||||
myJobPersistence = theJobPersistence;
|
||||
myBatchJobSender = theBatchJobSender;
|
||||
myJobStepExecutorSvc = theExecutorSvc;
|
||||
myJobMaintenanceService = theJobMaintenanceService;
|
||||
myJobDefinitionRegistry = theJobDefinitionRegistry;
|
||||
}
|
||||
|
||||
public <PT extends IModelJson, IT extends IModelJson, OT extends IModelJson> JobStepExecutor<PT,IT,OT> newJobStepExecutor(@Nonnull JobInstance theInstance, WorkChunk theWorkChunk, @Nonnull JobWorkCursor<PT, IT, OT> theCursor) {
|
||||
return new JobStepExecutor<>(myJobPersistence, myBatchJobSender, theInstance, theWorkChunk, theCursor, myJobStepExecutorSvc, myJobMaintenanceService);
|
||||
return new JobStepExecutor<>(myJobPersistence, myBatchJobSender, theInstance, theWorkChunk, theCursor, myJobStepExecutorSvc, myJobMaintenanceService, myJobDefinitionRegistry);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,14 +22,16 @@ package ca.uhn.fhir.batch2.coordinator;
|
|||
|
||||
import ca.uhn.fhir.batch2.api.IJobPersistence;
|
||||
import ca.uhn.fhir.batch2.api.JobExecutionFailedException;
|
||||
import ca.uhn.fhir.batch2.model.JobDefinition;
|
||||
import ca.uhn.fhir.batch2.maintenance.JobChunkProgressAccumulator;
|
||||
import ca.uhn.fhir.batch2.model.JobInstance;
|
||||
import ca.uhn.fhir.batch2.model.JobWorkCursor;
|
||||
import ca.uhn.fhir.batch2.model.StatusEnum;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunkData;
|
||||
import ca.uhn.fhir.batch2.progress.JobInstanceProgressCalculator;
|
||||
import ca.uhn.fhir.i18n.Msg;
|
||||
import ca.uhn.fhir.util.Logs;
|
||||
import ca.uhn.fhir.model.api.IModelJson;
|
||||
import ca.uhn.fhir.util.JsonUtil;
|
||||
import ca.uhn.fhir.util.Logs;
|
||||
import org.slf4j.Logger;
|
||||
|
||||
import java.util.Optional;
|
||||
|
@ -39,13 +41,15 @@ public class ReductionStepDataSink<PT extends IModelJson, IT extends IModelJson,
|
|||
private static final Logger ourLog = Logs.getBatchTroubleshootingLog();
|
||||
|
||||
private final IJobPersistence myJobPersistence;
|
||||
private final JobDefinitionRegistry myJobDefinitionRegistry;
|
||||
|
||||
protected ReductionStepDataSink(String theInstanceId,
|
||||
JobWorkCursor<PT, IT, OT> theJobWorkCursor,
|
||||
JobDefinition<PT> theDefinition,
|
||||
IJobPersistence thePersistence) {
|
||||
public ReductionStepDataSink(String theInstanceId,
|
||||
JobWorkCursor<PT, IT, OT> theJobWorkCursor,
|
||||
IJobPersistence thePersistence,
|
||||
JobDefinitionRegistry theJobDefinitionRegistry) {
|
||||
super(theInstanceId, theJobWorkCursor);
|
||||
myJobPersistence = thePersistence;
|
||||
myJobDefinitionRegistry = theJobDefinitionRegistry;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -57,15 +61,27 @@ public class ReductionStepDataSink<PT extends IModelJson, IT extends IModelJson,
|
|||
|
||||
if (instance.getReport() != null) {
|
||||
// last in wins - so we won't throw
|
||||
ourLog.error(
|
||||
"Report has already been set. Now it is being overwritten. Last in will win!");
|
||||
ourLog.error("Report has already been set. Now it is being overwritten. Last in will win!");
|
||||
}
|
||||
|
||||
JobChunkProgressAccumulator progressAccumulator = new JobChunkProgressAccumulator();
|
||||
JobInstanceProgressCalculator myJobInstanceProgressCalculator = new JobInstanceProgressCalculator(myJobPersistence, progressAccumulator, myJobDefinitionRegistry);
|
||||
myJobInstanceProgressCalculator.calculateInstanceProgressAndPopulateInstance(instance);
|
||||
|
||||
OT data = theData.getData();
|
||||
String dataString = JsonUtil.serialize(data, false);
|
||||
instance.setReport(dataString);
|
||||
ourLog.debug(JsonUtil.serialize(instance));
|
||||
instance.setStatus(StatusEnum.COMPLETED);
|
||||
|
||||
ourLog.info("Finalizing job instance {} with report length {} chars", instance.getInstanceId(), dataString.length());
|
||||
ourLog.atTrace()
|
||||
.addArgument(() -> JsonUtil.serialize(instance))
|
||||
.log("New instance state: {}");
|
||||
|
||||
myJobPersistence.updateInstance(instance);
|
||||
|
||||
ourLog.info("Finalized job instance {} with report length {} chars", instance.getInstanceId(), dataString.length());
|
||||
|
||||
} else {
|
||||
String msg = "No instance found with Id " + instanceId;
|
||||
ourLog.error(msg);
|
||||
|
|
|
@ -1,177 +0,0 @@
|
|||
package ca.uhn.fhir.batch2.coordinator;
|
||||
|
||||
/*-
|
||||
* #%L
|
||||
* HAPI FHIR JPA Server - Batch2 Task Processor
|
||||
* %%
|
||||
* Copyright (C) 2014 - 2023 Smile CDR, Inc.
|
||||
* %%
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
* #L%
|
||||
*/
|
||||
|
||||
import ca.uhn.fhir.batch2.api.ChunkExecutionDetails;
|
||||
import ca.uhn.fhir.batch2.api.IJobPersistence;
|
||||
import ca.uhn.fhir.batch2.api.IReductionStepWorker;
|
||||
import ca.uhn.fhir.batch2.model.ChunkOutcome;
|
||||
import ca.uhn.fhir.batch2.model.JobDefinitionStep;
|
||||
import ca.uhn.fhir.batch2.model.JobInstance;
|
||||
import ca.uhn.fhir.batch2.model.StatusEnum;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunk;
|
||||
import ca.uhn.fhir.model.api.IModelJson;
|
||||
import ca.uhn.fhir.util.Logs;
|
||||
import org.slf4j.Logger;
|
||||
import org.springframework.transaction.PlatformTransactionManager;
|
||||
import org.springframework.transaction.TransactionDefinition;
|
||||
import org.springframework.transaction.support.TransactionTemplate;
|
||||
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public class ReductionStepExecutor {
|
||||
private static final Logger ourLog = Logs.getBatchTroubleshootingLog();
|
||||
private final IJobPersistence myJobPersistence;
|
||||
private final PlatformTransactionManager myTxManager;
|
||||
private final TransactionTemplate myTxTemplate;
|
||||
|
||||
public ReductionStepExecutor(IJobPersistence theJobPersistence, PlatformTransactionManager theTransactionManager) {
|
||||
myJobPersistence = theJobPersistence;
|
||||
myTxManager = theTransactionManager;
|
||||
myTxTemplate = new TransactionTemplate(theTransactionManager);
|
||||
myTxTemplate.setPropagationBehavior(TransactionDefinition.PROPAGATION_REQUIRES_NEW);
|
||||
}
|
||||
|
||||
/**
|
||||
* Do work and construct execution details for job reduction step
|
||||
*/
|
||||
<PT extends IModelJson, IT extends IModelJson, OT extends IModelJson> boolean executeReductionStep(
|
||||
JobInstance theInstance,
|
||||
JobDefinitionStep<PT, IT, OT> theStep,
|
||||
Class<IT> theInputType,
|
||||
PT theParameters
|
||||
) {
|
||||
IReductionStepWorker<PT, IT, OT> reductionStepWorker = (IReductionStepWorker<PT, IT, OT>) theStep.getJobStepWorker();
|
||||
|
||||
if (!myJobPersistence.markInstanceAsStatus(theInstance.getInstanceId(), StatusEnum.FINALIZE)) {
|
||||
ourLog.warn(
|
||||
"JobInstance[{}] is already in FINALIZE state. In memory status is {}. Reduction step will not rerun!"
|
||||
+ " This could be a long running reduction job resulting in the processed msg not being acknowledge,"
|
||||
+ " or the result of a failed process or server restarting.",
|
||||
theInstance.getInstanceId(),
|
||||
theInstance.getStatus().name()
|
||||
);
|
||||
return false;
|
||||
}
|
||||
theInstance.setStatus(StatusEnum.FINALIZE);
|
||||
|
||||
boolean defaultSuccessValue = true;
|
||||
ReductionStepChunkProcessingResponse response = new ReductionStepChunkProcessingResponse(defaultSuccessValue);
|
||||
|
||||
try {
|
||||
myTxTemplate.executeWithoutResult((status) -> {
|
||||
try(Stream<WorkChunk> chunkIterator2 = myJobPersistence.fetchAllWorkChunksForStepStream(theInstance.getInstanceId(), theStep.getStepId())) {
|
||||
chunkIterator2.forEach((chunk) -> {
|
||||
processChunk(chunk, theInstance, theInputType, theParameters, reductionStepWorker, response);
|
||||
});
|
||||
}
|
||||
});
|
||||
} finally {
|
||||
|
||||
if (response.hasSuccessfulChunksIds()) {
|
||||
// complete the steps without making a new work chunk
|
||||
myJobPersistence.markWorkChunksWithStatusAndWipeData(theInstance.getInstanceId(),
|
||||
response.getSuccessfulChunkIds(),
|
||||
StatusEnum.COMPLETED,
|
||||
null // error message - none
|
||||
);
|
||||
}
|
||||
|
||||
if (response.hasFailedChunkIds()) {
|
||||
// mark any failed chunks as failed for aborting
|
||||
myJobPersistence.markWorkChunksWithStatusAndWipeData(theInstance.getInstanceId(),
|
||||
response.getFailedChunksIds(),
|
||||
StatusEnum.FAILED,
|
||||
"JOB ABORTED");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// if no successful chunks, return false
|
||||
if (!response.hasSuccessfulChunksIds()) {
|
||||
response.setSuccessful(false);
|
||||
}
|
||||
|
||||
return response.isSuccessful();
|
||||
}
|
||||
|
||||
private <PT extends IModelJson, IT extends IModelJson, OT extends IModelJson>
|
||||
void processChunk(WorkChunk theChunk,
|
||||
JobInstance theInstance,
|
||||
Class<IT> theInputType,
|
||||
PT theParameters,
|
||||
IReductionStepWorker<PT, IT, OT> theReductionStepWorker,
|
||||
ReductionStepChunkProcessingResponse theResponseObject){
|
||||
|
||||
if (!theChunk.getStatus().isIncomplete()) {
|
||||
// This should never happen since jobs with reduction are required to be gated
|
||||
ourLog.error("Unexpected chunk {} with status {} found while reducing {}. No chunks feeding into a reduction step should be complete.", theChunk.getId(), theChunk.getStatus(), theInstance);
|
||||
return;
|
||||
}
|
||||
|
||||
if (theResponseObject.hasFailedChunkIds()) {
|
||||
// we are going to fail all future chunks now
|
||||
theResponseObject.addFailedChunkId(theChunk);
|
||||
} else {
|
||||
try {
|
||||
// feed them into our reduction worker
|
||||
// this is the most likely area to throw,
|
||||
// as this is where db actions and processing is likely to happen
|
||||
ChunkExecutionDetails<PT, IT> chunkDetails = new ChunkExecutionDetails<>(theChunk.getData(theInputType), theParameters, theInstance.getInstanceId(), theChunk.getId());
|
||||
|
||||
ChunkOutcome outcome = theReductionStepWorker.consume(chunkDetails);
|
||||
|
||||
switch (outcome.getStatus()) {
|
||||
case SUCCESS:
|
||||
theResponseObject.addSuccessfulChunkId(theChunk);
|
||||
break;
|
||||
|
||||
case ABORT:
|
||||
ourLog.error("Processing of work chunk {} resulted in aborting job.", theChunk.getId());
|
||||
|
||||
// fail entire job - including all future workchunks
|
||||
theResponseObject.addFailedChunkId(theChunk);
|
||||
theResponseObject.setSuccessful(false);
|
||||
break;
|
||||
|
||||
case FAIL:
|
||||
// non-idempotent; but failed chunks will be
|
||||
// ignored on a second runthrough of reduction step
|
||||
myJobPersistence.markWorkChunkAsFailed(theChunk.getId(),
|
||||
"Step worker failed to process work chunk " + theChunk.getId());
|
||||
theResponseObject.setSuccessful(false);
|
||||
break;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
String msg = String.format(
|
||||
"Reduction step failed to execute chunk reduction for chunk %s with exception: %s.",
|
||||
theChunk.getId(),
|
||||
e.getMessage()
|
||||
);
|
||||
// we got a failure in a reduction
|
||||
ourLog.error(msg, e);
|
||||
theResponseObject.setSuccessful(false);
|
||||
|
||||
myJobPersistence.markWorkChunkAsFailed(theChunk.getId(), msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,338 @@
|
|||
package ca.uhn.fhir.batch2.coordinator;
|
||||
|
||||
/*-
|
||||
* #%L
|
||||
* HAPI FHIR JPA Server - Batch2 Task Processor
|
||||
* %%
|
||||
* Copyright (C) 2014 - 2023 Smile CDR, Inc.
|
||||
* %%
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
* #L%
|
||||
*/
|
||||
|
||||
import ca.uhn.fhir.batch2.api.ChunkExecutionDetails;
|
||||
import ca.uhn.fhir.batch2.api.IJobPersistence;
|
||||
import ca.uhn.fhir.batch2.api.IReductionStepExecutorService;
|
||||
import ca.uhn.fhir.batch2.api.IReductionStepWorker;
|
||||
import ca.uhn.fhir.batch2.api.StepExecutionDetails;
|
||||
import ca.uhn.fhir.batch2.model.ChunkOutcome;
|
||||
import ca.uhn.fhir.batch2.model.JobDefinitionStep;
|
||||
import ca.uhn.fhir.batch2.model.JobInstance;
|
||||
import ca.uhn.fhir.batch2.model.JobWorkCursor;
|
||||
import ca.uhn.fhir.batch2.model.StatusEnum;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunk;
|
||||
import ca.uhn.fhir.jpa.dao.tx.IHapiTransactionService;
|
||||
import ca.uhn.fhir.jpa.model.sched.HapiJob;
|
||||
import ca.uhn.fhir.jpa.model.sched.IHasScheduledJobs;
|
||||
import ca.uhn.fhir.jpa.model.sched.ISchedulerService;
|
||||
import ca.uhn.fhir.jpa.model.sched.ScheduledJobDefinition;
|
||||
import ca.uhn.fhir.model.api.IModelJson;
|
||||
import ca.uhn.fhir.rest.server.exceptions.InternalErrorException;
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.commons.lang3.time.DateUtils;
|
||||
import org.quartz.JobExecutionContext;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.context.event.ContextClosedEvent;
|
||||
import org.springframework.context.event.ContextRefreshedEvent;
|
||||
import org.springframework.context.event.EventListener;
|
||||
import org.springframework.scheduling.concurrent.CustomizableThreadFactory;
|
||||
import org.springframework.transaction.annotation.Propagation;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.util.Collections;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Timer;
|
||||
import java.util.TimerTask;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Semaphore;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public class ReductionStepExecutorServiceImpl implements IReductionStepExecutorService, IHasScheduledJobs {
|
||||
public static final String SCHEDULED_JOB_ID = ReductionStepExecutorScheduledJob.class.getName();
|
||||
private static final Logger ourLog = LoggerFactory.getLogger(ReductionStepExecutorServiceImpl.class);
|
||||
private final Map<String, JobWorkCursor> myInstanceIdToJobWorkCursor = Collections.synchronizedMap(new LinkedHashMap<>());
|
||||
private final ExecutorService myReducerExecutor;
|
||||
private final IJobPersistence myJobPersistence;
|
||||
private final IHapiTransactionService myTransactionService;
|
||||
private final Semaphore myCurrentlyExecuting = new Semaphore(1);
|
||||
private final AtomicReference<String> myCurrentlyFinalizingInstanceId = new AtomicReference<>();
|
||||
private Timer myHeartbeatTimer;
|
||||
private final JobDefinitionRegistry myJobDefinitionRegistry;
|
||||
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
*/
|
||||
public ReductionStepExecutorServiceImpl(IJobPersistence theJobPersistence, IHapiTransactionService theTransactionService, JobDefinitionRegistry theJobDefinitionRegistry) {
|
||||
myJobPersistence = theJobPersistence;
|
||||
myTransactionService = theTransactionService;
|
||||
myJobDefinitionRegistry = theJobDefinitionRegistry;
|
||||
|
||||
myReducerExecutor = Executors.newSingleThreadExecutor(new CustomizableThreadFactory("batch2-reducer"));
|
||||
}
|
||||
|
||||
|
||||
@EventListener(ContextRefreshedEvent.class)
|
||||
public void start() {
|
||||
myHeartbeatTimer = new Timer("batch2-reducer-heartbeat");
|
||||
myHeartbeatTimer.schedule(new HeartbeatTimerTask(), DateUtils.MILLIS_PER_MINUTE, DateUtils.MILLIS_PER_MINUTE);
|
||||
}
|
||||
|
||||
private void runHeartbeat() {
|
||||
String currentlyFinalizingInstanceId = myCurrentlyFinalizingInstanceId.get();
|
||||
if (currentlyFinalizingInstanceId != null) {
|
||||
ourLog.info("Running heartbeat for instance: {}", currentlyFinalizingInstanceId);
|
||||
executeInTransactionWithSynchronization(()->{
|
||||
myJobPersistence.updateInstanceUpdateTime(currentlyFinalizingInstanceId);
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@EventListener(ContextClosedEvent.class)
|
||||
public void shutdown() {
|
||||
myHeartbeatTimer.cancel();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void triggerReductionStep(String theInstanceId, JobWorkCursor<?, ?, ?> theJobWorkCursor) {
|
||||
myInstanceIdToJobWorkCursor.putIfAbsent(theInstanceId, theJobWorkCursor);
|
||||
if (myCurrentlyExecuting.availablePermits() > 0) {
|
||||
myReducerExecutor.submit(() -> reducerPass());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reducerPass() {
|
||||
if (myCurrentlyExecuting.tryAcquire()) {
|
||||
try {
|
||||
|
||||
String[] instanceIds = myInstanceIdToJobWorkCursor.keySet().toArray(new String[0]);
|
||||
if (instanceIds.length > 0) {
|
||||
String instanceId = instanceIds[0];
|
||||
myCurrentlyFinalizingInstanceId.set(instanceId);
|
||||
JobWorkCursor<?, ?, ?> jobWorkCursor = myInstanceIdToJobWorkCursor.get(instanceId);
|
||||
executeReductionStep(instanceId, jobWorkCursor);
|
||||
|
||||
// If we get here, this succeeded. Purge the instance from the work queue
|
||||
myInstanceIdToJobWorkCursor.remove(instanceId);
|
||||
}
|
||||
|
||||
} finally {
|
||||
myCurrentlyFinalizingInstanceId.set(null);
|
||||
myCurrentlyExecuting.release();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
<PT extends IModelJson, IT extends IModelJson, OT extends IModelJson> ReductionStepChunkProcessingResponse executeReductionStep(String theInstanceId, JobWorkCursor<PT, IT, OT> theJobWorkCursor) {
|
||||
|
||||
JobDefinitionStep<PT, IT, OT> step = theJobWorkCursor.getCurrentStep();
|
||||
|
||||
JobInstance instance = executeInTransactionWithSynchronization(() -> {
|
||||
JobInstance currentInstance = myJobPersistence.fetchInstance(theInstanceId).orElseThrow(() -> new InternalErrorException("Unknown currentInstance: " + theInstanceId));
|
||||
boolean shouldProceed = false;
|
||||
switch (currentInstance.getStatus()) {
|
||||
case IN_PROGRESS:
|
||||
case ERRORED:
|
||||
if (myJobPersistence.markInstanceAsStatus(currentInstance.getInstanceId(), StatusEnum.FINALIZE)) {
|
||||
ourLog.info("Job instance {} has been set to FINALIZE state - Beginning reducer step", currentInstance.getInstanceId());
|
||||
shouldProceed = true;
|
||||
}
|
||||
break;
|
||||
case FINALIZE:
|
||||
case COMPLETED:
|
||||
case FAILED:
|
||||
case QUEUED:
|
||||
case CANCELLED:
|
||||
break;
|
||||
}
|
||||
|
||||
if (!shouldProceed) {
|
||||
ourLog.warn(
|
||||
"JobInstance[{}] should not be finalized at this time. In memory status is {}. Reduction step will not rerun!"
|
||||
+ " This could be a long running reduction job resulting in the processed msg not being acknowledge,"
|
||||
+ " or the result of a failed process or server restarting.",
|
||||
currentInstance.getInstanceId(),
|
||||
currentInstance.getStatus().name()
|
||||
);
|
||||
return null;
|
||||
}
|
||||
|
||||
return currentInstance;
|
||||
});
|
||||
if (instance == null) {
|
||||
return new ReductionStepChunkProcessingResponse(false);
|
||||
}
|
||||
|
||||
PT parameters = instance.getParameters(theJobWorkCursor.getJobDefinition().getParametersType());
|
||||
IReductionStepWorker<PT, IT, OT> reductionStepWorker = (IReductionStepWorker<PT, IT, OT>) step.getJobStepWorker();
|
||||
|
||||
instance.setStatus(StatusEnum.FINALIZE);
|
||||
|
||||
boolean defaultSuccessValue = true;
|
||||
ReductionStepChunkProcessingResponse response = new ReductionStepChunkProcessingResponse(defaultSuccessValue);
|
||||
|
||||
try {
|
||||
executeInTransactionWithSynchronization(() -> {
|
||||
try (Stream<WorkChunk> chunkIterator = myJobPersistence.fetchAllWorkChunksForStepStream(instance.getInstanceId(), step.getStepId())) {
|
||||
chunkIterator.forEach((chunk) -> {
|
||||
processChunk(chunk, instance, parameters, reductionStepWorker, response, theJobWorkCursor);
|
||||
});
|
||||
}
|
||||
return null;
|
||||
});
|
||||
} finally {
|
||||
|
||||
executeInTransactionWithSynchronization(() -> {
|
||||
ourLog.info("Reduction step for instance[{}] produced {} successful and {} failed chunks", instance.getInstanceId(), response.getSuccessfulChunkIds().size(), response.getFailedChunksIds().size());
|
||||
|
||||
ReductionStepDataSink<PT, IT, OT> dataSink = new ReductionStepDataSink<>(instance.getInstanceId(), theJobWorkCursor, myJobPersistence, myJobDefinitionRegistry);
|
||||
StepExecutionDetails<PT, IT> chunkDetails = new StepExecutionDetails<>(parameters, null, instance, "REDUCTION");
|
||||
|
||||
if (response.isSuccessful()) {
|
||||
reductionStepWorker.run(chunkDetails, dataSink);
|
||||
}
|
||||
|
||||
if (response.hasSuccessfulChunksIds()) {
|
||||
// complete the steps without making a new work chunk
|
||||
myJobPersistence.markWorkChunksWithStatusAndWipeData(instance.getInstanceId(),
|
||||
response.getSuccessfulChunkIds(),
|
||||
StatusEnum.COMPLETED,
|
||||
null // error message - none
|
||||
);
|
||||
}
|
||||
|
||||
if (response.hasFailedChunkIds()) {
|
||||
// mark any failed chunks as failed for aborting
|
||||
myJobPersistence.markWorkChunksWithStatusAndWipeData(instance.getInstanceId(),
|
||||
response.getFailedChunksIds(),
|
||||
StatusEnum.FAILED,
|
||||
"JOB ABORTED");
|
||||
}
|
||||
return null;
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
// if no successful chunks, return false
|
||||
if (!response.hasSuccessfulChunksIds()) {
|
||||
response.setSuccessful(false);
|
||||
}
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
private <T> T executeInTransactionWithSynchronization(Callable<T> runnable) {
|
||||
return myTransactionService
|
||||
.withRequest(null)
|
||||
.withPropagation(Propagation.REQUIRES_NEW)
|
||||
.execute(runnable);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void scheduleJobs(ISchedulerService theSchedulerService) {
|
||||
theSchedulerService.scheduleClusteredJob(10 * DateUtils.MILLIS_PER_SECOND, buildJobDefinition());
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
private ScheduledJobDefinition buildJobDefinition() {
|
||||
ScheduledJobDefinition jobDefinition = new ScheduledJobDefinition();
|
||||
jobDefinition.setId(SCHEDULED_JOB_ID);
|
||||
jobDefinition.setJobClass(ReductionStepExecutorScheduledJob.class);
|
||||
return jobDefinition;
|
||||
}
|
||||
|
||||
private <PT extends IModelJson, IT extends IModelJson, OT extends IModelJson>
|
||||
void processChunk(WorkChunk theChunk,
|
||||
JobInstance theInstance,
|
||||
PT theParameters,
|
||||
IReductionStepWorker<PT, IT, OT> theReductionStepWorker,
|
||||
ReductionStepChunkProcessingResponse theResponseObject,
|
||||
JobWorkCursor<PT, IT, OT> theJobWorkCursor) {
|
||||
|
||||
if (!theChunk.getStatus().isIncomplete()) {
|
||||
// This should never happen since jobs with reduction are required to be gated
|
||||
ourLog.error("Unexpected chunk {} with status {} found while reducing {}. No chunks feeding into a reduction step should be complete.", theChunk.getId(), theChunk.getStatus(), theInstance);
|
||||
return;
|
||||
}
|
||||
|
||||
if (theResponseObject.hasFailedChunkIds()) {
|
||||
// we are going to fail all future chunks now
|
||||
theResponseObject.addFailedChunkId(theChunk);
|
||||
} else {
|
||||
try {
|
||||
// feed them into our reduction worker
|
||||
// this is the most likely area to throw,
|
||||
// as this is where db actions and processing is likely to happen
|
||||
IT chunkData = theChunk.getData(theJobWorkCursor.getCurrentStep().getInputType());
|
||||
ChunkExecutionDetails<PT, IT> chunkDetails = new ChunkExecutionDetails<>(chunkData, theParameters, theInstance.getInstanceId(), theChunk.getId());
|
||||
|
||||
ChunkOutcome outcome = theReductionStepWorker.consume(chunkDetails);
|
||||
|
||||
switch (outcome.getStatus()) {
|
||||
case SUCCESS:
|
||||
theResponseObject.addSuccessfulChunkId(theChunk);
|
||||
break;
|
||||
|
||||
case FAILED:
|
||||
ourLog.error("Processing of work chunk {} resulted in aborting job.", theChunk.getId());
|
||||
|
||||
// fail entire job - including all future workchunks
|
||||
theResponseObject.addFailedChunkId(theChunk);
|
||||
theResponseObject.setSuccessful(false);
|
||||
break;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
String msg = String.format(
|
||||
"Reduction step failed to execute chunk reduction for chunk %s with exception: %s.",
|
||||
theChunk.getId(),
|
||||
e.getMessage()
|
||||
);
|
||||
// we got a failure in a reduction
|
||||
ourLog.error(msg, e);
|
||||
theResponseObject.setSuccessful(false);
|
||||
|
||||
myJobPersistence.markWorkChunkAsFailed(theChunk.getId(), msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private class HeartbeatTimerTask extends TimerTask {
|
||||
@Override
|
||||
public void run() {
|
||||
runHeartbeat();
|
||||
}
|
||||
}
|
||||
|
||||
public static class ReductionStepExecutorScheduledJob implements HapiJob {
|
||||
@Autowired
|
||||
private IReductionStepExecutorService myTarget;
|
||||
|
||||
@Override
|
||||
public void execute(JobExecutionContext theContext) {
|
||||
myTarget.reducerPass();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -108,7 +108,7 @@ public class StepExecutor {
|
|||
int recordsProcessed = outcome.getRecordsProcessed();
|
||||
int recoveredErrorCount = theDataSink.getRecoveredErrorCount();
|
||||
|
||||
myJobPersistence.markWorkChunkAsCompletedAndClearData(chunkId, recordsProcessed);
|
||||
myJobPersistence.markWorkChunkAsCompletedAndClearData(theStepExecutionDetails.getInstance().getInstanceId(), chunkId, recordsProcessed);
|
||||
if (recoveredErrorCount > 0) {
|
||||
myJobPersistence.incrementWorkChunkErrorCount(chunkId, recoveredErrorCount);
|
||||
}
|
||||
|
|
|
@ -120,8 +120,8 @@ public class SynchronizedJobPersistenceWrapper implements IJobPersistence {
|
|||
}
|
||||
|
||||
@Override
|
||||
public synchronized void markWorkChunkAsCompletedAndClearData(String theChunkId, int theRecordsProcessed) {
|
||||
myWrap.markWorkChunkAsCompletedAndClearData(theChunkId, theRecordsProcessed);
|
||||
public synchronized void markWorkChunkAsCompletedAndClearData(String theInstanceId, String theChunkId, int theRecordsProcessed) {
|
||||
myWrap.markWorkChunkAsCompletedAndClearData(theInstanceId, theChunkId, theRecordsProcessed);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -170,8 +170,8 @@ public class SynchronizedJobPersistenceWrapper implements IJobPersistence {
|
|||
}
|
||||
|
||||
@Override
|
||||
public synchronized void deleteChunks(String theInstanceId) {
|
||||
myWrap.deleteChunks(theInstanceId);
|
||||
public synchronized void deleteChunksAndMarkInstanceAsChunksPurged(String theInstanceId) {
|
||||
myWrap.deleteChunksAndMarkInstanceAsChunksPurged(theInstanceId);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -193,4 +193,9 @@ public class SynchronizedJobPersistenceWrapper implements IJobPersistence {
|
|||
public List<String> fetchallchunkidsforstepWithStatus(String theInstanceId, String theStepId, StatusEnum theStatusEnum) {
|
||||
return myWrap.fetchallchunkidsforstepWithStatus(theInstanceId, theStepId, theStatusEnum);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void updateInstanceUpdateTime(String theInstanceId) {
|
||||
myWrap.updateInstanceUpdateTime(theInstanceId);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,11 +32,9 @@ import ca.uhn.fhir.batch2.model.JobWorkNotificationJsonMessage;
|
|||
import ca.uhn.fhir.batch2.model.StatusEnum;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunk;
|
||||
import ca.uhn.fhir.batch2.progress.JobInstanceStatusUpdater;
|
||||
import ca.uhn.fhir.batch2.util.Batch2Constants;
|
||||
import ca.uhn.fhir.rest.server.exceptions.InternalErrorException;
|
||||
import ca.uhn.fhir.util.Logs;
|
||||
import org.apache.commons.lang3.Validate;
|
||||
import javax.validation.constraints.NotNull;
|
||||
import org.slf4j.Logger;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.MessageHandler;
|
||||
|
@ -44,9 +42,6 @@ import org.springframework.messaging.MessagingException;
|
|||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.ThreadFactory;
|
||||
|
||||
/**
|
||||
* This handler receives batch work request messages and performs the batch work requested by the message
|
||||
|
@ -65,8 +60,8 @@ class WorkChannelMessageHandler implements MessageHandler {
|
|||
@Nonnull IJobMaintenanceService theJobMaintenanceService) {
|
||||
myJobPersistence = theJobPersistence;
|
||||
myJobDefinitionRegistry = theJobDefinitionRegistry;
|
||||
myJobStepExecutorFactory = new JobStepExecutorFactory(theJobPersistence, theBatchJobSender, theExecutorSvc, theJobMaintenanceService);
|
||||
myJobInstanceStatusUpdater = new JobInstanceStatusUpdater(theJobPersistence);
|
||||
myJobStepExecutorFactory = new JobStepExecutorFactory(theJobPersistence, theBatchJobSender, theExecutorSvc, theJobMaintenanceService, theJobDefinitionRegistry);
|
||||
myJobInstanceStatusUpdater = new JobInstanceStatusUpdater(theJobPersistence, theJobDefinitionRegistry);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -81,27 +76,19 @@ class WorkChannelMessageHandler implements MessageHandler {
|
|||
String chunkId = workNotification.getChunkId();
|
||||
Validate.notNull(chunkId);
|
||||
|
||||
boolean isReductionWorkNotification = Batch2Constants.REDUCTION_STEP_CHUNK_ID_PLACEHOLDER.equals(chunkId);
|
||||
|
||||
JobWorkCursor<?, ?, ?> cursor = null;
|
||||
WorkChunk workChunk = null;
|
||||
if (!isReductionWorkNotification) {
|
||||
Optional<WorkChunk> chunkOpt = myJobPersistence.fetchWorkChunkSetStartTimeAndMarkInProgress(chunkId);
|
||||
if (chunkOpt.isEmpty()) {
|
||||
ourLog.error("Unable to find chunk with ID {} - Aborting", chunkId);
|
||||
return;
|
||||
}
|
||||
workChunk = chunkOpt.get();
|
||||
ourLog.debug("Worker picked up chunk. [chunkId={}, stepId={}, startTime={}]", chunkId, workChunk.getTargetStepId(), workChunk.getStartTime());
|
||||
|
||||
cursor = buildCursorFromNotification(workNotification);
|
||||
|
||||
Validate.isTrue(workChunk.getTargetStepId().equals(cursor.getCurrentStepId()), "Chunk %s has target step %s but expected %s", chunkId, workChunk.getTargetStepId(), cursor.getCurrentStepId());
|
||||
} else {
|
||||
ourLog.debug("Processing reduction step work notification. No associated workchunks.");
|
||||
|
||||
cursor = buildCursorFromNotification(workNotification);
|
||||
Optional<WorkChunk> chunkOpt = myJobPersistence.fetchWorkChunkSetStartTimeAndMarkInProgress(chunkId);
|
||||
if (chunkOpt.isEmpty()) {
|
||||
ourLog.error("Unable to find chunk with ID {} - Aborting", chunkId);
|
||||
return;
|
||||
}
|
||||
workChunk = chunkOpt.get();
|
||||
ourLog.debug("Worker picked up chunk. [chunkId={}, stepId={}, startTime={}]", chunkId, workChunk.getTargetStepId(), workChunk.getStartTime());
|
||||
|
||||
cursor = buildCursorFromNotification(workNotification);
|
||||
|
||||
Validate.isTrue(workChunk.getTargetStepId().equals(cursor.getCurrentStepId()), "Chunk %s has target step %s but expected %s", chunkId, workChunk.getTargetStepId(), cursor.getCurrentStepId());
|
||||
|
||||
Optional<JobInstance> instanceOpt = myJobPersistence.fetchInstance(workNotification.getInstanceId());
|
||||
JobInstance instance = instanceOpt.orElseThrow(() -> new InternalErrorException("Unknown instance: " + workNotification.getInstanceId()));
|
||||
|
@ -116,27 +103,7 @@ class WorkChannelMessageHandler implements MessageHandler {
|
|||
}
|
||||
|
||||
JobStepExecutor<?,?,?> stepExecutor = myJobStepExecutorFactory.newJobStepExecutor(instance, workChunk, cursor);
|
||||
// TODO - ls
|
||||
/*
|
||||
* We should change this to actually have
|
||||
* the reduction step take in smaller sets of
|
||||
* lists of chunks from the previous steps (one
|
||||
* at a time still) and compose the
|
||||
* report gradually and in an idempotent way
|
||||
*/
|
||||
if (isReductionWorkNotification) {
|
||||
// do async due to long running process
|
||||
// we'll fire off a separate thread and let the job continue
|
||||
ScheduledExecutorService exService = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {
|
||||
@Override
|
||||
public Thread newThread(@NotNull Runnable r) {
|
||||
return new Thread(r, "Reduction-step-thread");
|
||||
}
|
||||
});
|
||||
exService.execute(stepExecutor::executeStep);
|
||||
} else {
|
||||
stepExecutor.executeStep();
|
||||
}
|
||||
stepExecutor.executeStep();
|
||||
}
|
||||
|
||||
private void markInProgressIfQueued(JobInstance theInstance) {
|
||||
|
|
|
@ -22,8 +22,6 @@ package ca.uhn.fhir.batch2.coordinator;
|
|||
|
||||
import ca.uhn.fhir.batch2.api.IJobPersistence;
|
||||
import ca.uhn.fhir.batch2.api.IJobStepWorker;
|
||||
import ca.uhn.fhir.batch2.api.IReductionStepWorker;
|
||||
import ca.uhn.fhir.batch2.api.ReductionStepExecutionDetails;
|
||||
import ca.uhn.fhir.batch2.api.StepExecutionDetails;
|
||||
import ca.uhn.fhir.batch2.api.VoidModel;
|
||||
import ca.uhn.fhir.batch2.channel.BatchJobSender;
|
||||
|
@ -32,11 +30,11 @@ import ca.uhn.fhir.batch2.model.JobDefinitionStep;
|
|||
import ca.uhn.fhir.batch2.model.JobInstance;
|
||||
import ca.uhn.fhir.batch2.model.JobWorkCursor;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunk;
|
||||
import ca.uhn.fhir.jpa.dao.tx.IHapiTransactionService;
|
||||
import ca.uhn.fhir.model.api.IModelJson;
|
||||
import ca.uhn.fhir.util.Logs;
|
||||
import org.apache.commons.lang3.Validate;
|
||||
import org.slf4j.Logger;
|
||||
import org.springframework.transaction.PlatformTransactionManager;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import java.util.Optional;
|
||||
|
@ -44,13 +42,10 @@ import java.util.Optional;
|
|||
import static org.apache.commons.lang3.StringUtils.isBlank;
|
||||
|
||||
public class WorkChunkProcessor {
|
||||
private static final Logger ourLog = Logs.getBatchTroubleshootingLog();
|
||||
|
||||
// TODO
|
||||
/**
|
||||
* This retry only works if your channel producer supports
|
||||
* retries on message processing exceptions.
|
||||
*
|
||||
* <p>
|
||||
* What's more, we may one day want to have this configurable
|
||||
* by the caller.
|
||||
* But since this is not a feature of HAPI,
|
||||
|
@ -58,29 +53,28 @@ public class WorkChunkProcessor {
|
|||
*/
|
||||
public static final int MAX_CHUNK_ERROR_COUNT = 3;
|
||||
|
||||
private static final Logger ourLog = Logs.getBatchTroubleshootingLog();
|
||||
private final IJobPersistence myJobPersistence;
|
||||
private final BatchJobSender myBatchJobSender;
|
||||
private final StepExecutor myStepExecutor;
|
||||
private final ReductionStepExecutor myReductionStepExecutor;
|
||||
|
||||
public WorkChunkProcessor(IJobPersistence theJobPersistence,
|
||||
BatchJobSender theSender,
|
||||
PlatformTransactionManager theTransactionManager) {
|
||||
IHapiTransactionService theTransactionService) {
|
||||
myJobPersistence = theJobPersistence;
|
||||
myBatchJobSender = theSender;
|
||||
myStepExecutor = new StepExecutor(theJobPersistence);
|
||||
myReductionStepExecutor = new ReductionStepExecutor(theJobPersistence, theTransactionManager);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute the work chunk.
|
||||
*
|
||||
* @param theCursor - work cursor
|
||||
* @param theInstance - the job instance
|
||||
* @param theCursor - work cursor
|
||||
* @param theInstance - the job instance
|
||||
* @param theWorkChunk - the work chunk (if available); can be null (for reduction step only!)
|
||||
* @param <PT> - Job parameters Type
|
||||
* @param <IT> - Step input parameters Type
|
||||
* @param <OT> - Step output parameters Type
|
||||
* @param <PT> - Job parameters Type
|
||||
* @param <IT> - Step input parameters Type
|
||||
* @param <OT> - Step output parameters Type
|
||||
* @return - JobStepExecution output. Contains the datasink and whether or not the execution had succeeded.
|
||||
*/
|
||||
public <PT extends IModelJson, IT extends IModelJson, OT extends IModelJson> JobStepExecutorOutput<PT, IT, OT>
|
||||
|
@ -98,53 +92,37 @@ public class WorkChunkProcessor {
|
|||
IJobStepWorker<PT, IT, OT> worker = step.getJobStepWorker();
|
||||
BaseDataSink<PT, IT, OT> dataSink = getDataSink(theCursor, jobDefinition, instanceId);
|
||||
|
||||
if (step.isReductionStep()) {
|
||||
// reduction step details
|
||||
boolean success = myReductionStepExecutor.executeReductionStep(theInstance, step, inputType, parameters);
|
||||
assert !step.isReductionStep();
|
||||
|
||||
if (success) {
|
||||
// Now call the normal step executor
|
||||
// the data sink stores the report on the instance (i.e. not chunks).
|
||||
// Assume the OT (report) data is smaller than the list of all IT data
|
||||
|
||||
ReductionStepExecutionDetails<PT, IT, OT> reductionStepExecutionDetails = new ReductionStepExecutionDetails<>(parameters, theInstance);
|
||||
IReductionStepWorker<PT, IT, OT> reductionStepWorker = (IReductionStepWorker<PT, IT, OT>) step.getJobStepWorker();
|
||||
|
||||
success = myStepExecutor.executeStep(reductionStepExecutionDetails, reductionStepWorker, dataSink);
|
||||
}
|
||||
|
||||
return new JobStepExecutorOutput<>(success, dataSink);
|
||||
} else {
|
||||
// all other kinds of steps
|
||||
Validate.notNull(theWorkChunk);
|
||||
Optional<StepExecutionDetails<PT, IT>> stepExecutionDetailsOpt = getExecutionDetailsForNonReductionStep(theWorkChunk, theInstance, inputType, parameters);
|
||||
if (!stepExecutionDetailsOpt.isPresent()) {
|
||||
return new JobStepExecutorOutput<>(false, dataSink);
|
||||
}
|
||||
|
||||
StepExecutionDetails<PT, IT> stepExecutionDetails = stepExecutionDetailsOpt.get();
|
||||
|
||||
// execute the step
|
||||
boolean success = myStepExecutor.executeStep(stepExecutionDetails, worker, dataSink);
|
||||
|
||||
// return results with data sink
|
||||
return new JobStepExecutorOutput<>(success, dataSink);
|
||||
// all other kinds of steps
|
||||
Validate.notNull(theWorkChunk);
|
||||
Optional<StepExecutionDetails<PT, IT>> stepExecutionDetailsOpt = getExecutionDetailsForNonReductionStep(theWorkChunk, theInstance, inputType, parameters);
|
||||
if (!stepExecutionDetailsOpt.isPresent()) {
|
||||
return new JobStepExecutorOutput<>(false, dataSink);
|
||||
}
|
||||
|
||||
StepExecutionDetails<PT, IT> stepExecutionDetails = stepExecutionDetailsOpt.get();
|
||||
|
||||
// execute the step
|
||||
boolean success = myStepExecutor.executeStep(stepExecutionDetails, worker, dataSink);
|
||||
|
||||
// return results with data sink
|
||||
return new JobStepExecutorOutput<>(success, dataSink);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the correct datasink for the cursor/job provided.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
protected <PT extends IModelJson, IT extends IModelJson, OT extends IModelJson> BaseDataSink<PT, IT, OT> getDataSink(
|
||||
protected <PT extends IModelJson, IT extends IModelJson, OT extends IModelJson> BaseDataSink<PT, IT, OT> getDataSink(
|
||||
JobWorkCursor<PT, IT, OT> theCursor,
|
||||
JobDefinition<PT> theJobDefinition,
|
||||
String theInstanceId
|
||||
) {
|
||||
BaseDataSink<PT, IT, OT> dataSink;
|
||||
if (theCursor.isReductionStep()) {
|
||||
dataSink = new ReductionStepDataSink<>(theInstanceId, theCursor, theJobDefinition, myJobPersistence);
|
||||
} else if (theCursor.isFinalStep()) {
|
||||
|
||||
assert !theCursor.isReductionStep();
|
||||
if (theCursor.isFinalStep()) {
|
||||
dataSink = (BaseDataSink<PT, IT, OT>) new FinalStepDataSink<>(theJobDefinition.getJobDefinitionId(), theInstanceId, theCursor.asFinalCursor());
|
||||
} else {
|
||||
dataSink = new JobDataSink<>(myBatchJobSender, myJobPersistence, theJobDefinition, theInstanceId, theCursor);
|
||||
|
|
|
@ -21,15 +21,17 @@ package ca.uhn.fhir.batch2.maintenance;
|
|||
*/
|
||||
|
||||
import ca.uhn.fhir.batch2.api.IJobPersistence;
|
||||
import ca.uhn.fhir.batch2.api.IReductionStepExecutorService;
|
||||
import ca.uhn.fhir.batch2.channel.BatchJobSender;
|
||||
import ca.uhn.fhir.batch2.coordinator.WorkChunkProcessor;
|
||||
import ca.uhn.fhir.batch2.coordinator.JobDefinitionRegistry;
|
||||
import ca.uhn.fhir.batch2.model.JobDefinition;
|
||||
import ca.uhn.fhir.batch2.model.JobInstance;
|
||||
import ca.uhn.fhir.batch2.model.JobWorkCursor;
|
||||
import ca.uhn.fhir.batch2.model.JobWorkNotification;
|
||||
import ca.uhn.fhir.batch2.model.StatusEnum;
|
||||
import ca.uhn.fhir.batch2.progress.JobInstanceProgressCalculator;
|
||||
import ca.uhn.fhir.batch2.progress.JobInstanceStatusUpdater;
|
||||
import ca.uhn.fhir.batch2.util.Batch2Constants;
|
||||
import ca.uhn.fhir.model.api.IModelJson;
|
||||
import ca.uhn.fhir.util.Logs;
|
||||
import org.apache.commons.lang3.time.DateUtils;
|
||||
import org.slf4j.Logger;
|
||||
|
@ -42,113 +44,121 @@ public class JobInstanceProcessor {
|
|||
|
||||
private final IJobPersistence myJobPersistence;
|
||||
private final BatchJobSender myBatchJobSender;
|
||||
|
||||
private final JobInstance myInstance;
|
||||
private final JobChunkProgressAccumulator myProgressAccumulator;
|
||||
private final JobInstanceProgressCalculator myJobInstanceProgressCalculator;
|
||||
private final WorkChunkProcessor myJobExecutorSvc;
|
||||
private final JobInstanceStatusUpdater myJobInstanceStatusUpdater;
|
||||
private final IReductionStepExecutorService myReductionStepExecutorService;
|
||||
private final String myInstanceId;
|
||||
private final JobDefinitionRegistry myJobDefinitionegistry;
|
||||
|
||||
JobInstanceProcessor(IJobPersistence theJobPersistence,
|
||||
BatchJobSender theBatchJobSender,
|
||||
JobInstance theInstance,
|
||||
String theInstanceId,
|
||||
JobChunkProgressAccumulator theProgressAccumulator,
|
||||
WorkChunkProcessor theExecutorSvc
|
||||
) {
|
||||
IReductionStepExecutorService theReductionStepExecutorService,
|
||||
JobDefinitionRegistry theJobDefinitionRegistry) {
|
||||
myJobPersistence = theJobPersistence;
|
||||
myBatchJobSender = theBatchJobSender;
|
||||
myInstance = theInstance;
|
||||
myJobExecutorSvc = theExecutorSvc;
|
||||
myInstanceId = theInstanceId;
|
||||
myProgressAccumulator = theProgressAccumulator;
|
||||
myJobInstanceProgressCalculator = new JobInstanceProgressCalculator(theJobPersistence, theInstance, theProgressAccumulator);
|
||||
myJobInstanceStatusUpdater = new JobInstanceStatusUpdater(theJobPersistence);
|
||||
myReductionStepExecutorService = theReductionStepExecutorService;
|
||||
myJobDefinitionegistry = theJobDefinitionRegistry;
|
||||
myJobInstanceProgressCalculator = new JobInstanceProgressCalculator(theJobPersistence, theProgressAccumulator, theJobDefinitionRegistry);
|
||||
myJobInstanceStatusUpdater = new JobInstanceStatusUpdater(theJobPersistence, theJobDefinitionRegistry);
|
||||
}
|
||||
|
||||
public void process() {
|
||||
handleCancellation();
|
||||
cleanupInstance();
|
||||
triggerGatedExecutions();
|
||||
JobInstance theInstance = myJobPersistence.fetchInstance(myInstanceId).orElse(null);
|
||||
if (theInstance == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
handleCancellation(theInstance);
|
||||
cleanupInstance(theInstance);
|
||||
triggerGatedExecutions(theInstance);
|
||||
}
|
||||
|
||||
private void handleCancellation() {
|
||||
if (myInstance.isPendingCancellationRequest()) {
|
||||
myInstance.setErrorMessage(buildCancelledMessage());
|
||||
myJobInstanceStatusUpdater.setCancelled(myInstance);
|
||||
private void handleCancellation(JobInstance theInstance) {
|
||||
if (theInstance.isPendingCancellationRequest()) {
|
||||
theInstance.setErrorMessage(buildCancelledMessage(theInstance));
|
||||
myJobInstanceStatusUpdater.setCancelled(theInstance);
|
||||
}
|
||||
}
|
||||
|
||||
private String buildCancelledMessage() {
|
||||
private String buildCancelledMessage(JobInstance theInstance) {
|
||||
String msg = "Job instance cancelled";
|
||||
if (myInstance.hasGatedStep()) {
|
||||
msg += " while running step " + myInstance.getCurrentGatedStepId();
|
||||
if (theInstance.hasGatedStep()) {
|
||||
msg += " while running step " + theInstance.getCurrentGatedStepId();
|
||||
}
|
||||
return msg;
|
||||
}
|
||||
|
||||
private void cleanupInstance() {
|
||||
switch (myInstance.getStatus()) {
|
||||
private void cleanupInstance(JobInstance theInstance) {
|
||||
switch (theInstance.getStatus()) {
|
||||
case QUEUED:
|
||||
// If we're still QUEUED, there are no stats to calculate
|
||||
break;
|
||||
case FINALIZE:
|
||||
// If we're in FINALIZE, the reduction step is working so we should stay out of the way until it
|
||||
// marks the job as COMPLETED
|
||||
return;
|
||||
case IN_PROGRESS:
|
||||
case ERRORED:
|
||||
case FINALIZE:
|
||||
myJobInstanceProgressCalculator.calculateAndStoreInstanceProgress();
|
||||
myJobInstanceProgressCalculator.calculateAndStoreInstanceProgress(theInstance);
|
||||
break;
|
||||
case COMPLETED:
|
||||
case FAILED:
|
||||
case CANCELLED:
|
||||
if (purgeExpiredInstance()) {
|
||||
if (purgeExpiredInstance(theInstance)) {
|
||||
return;
|
||||
}
|
||||
break;
|
||||
case CANCELLED:
|
||||
purgeExpiredInstance(theInstance);
|
||||
return;
|
||||
}
|
||||
|
||||
if (myInstance.isFinished() && !myInstance.isWorkChunksPurged()) {
|
||||
myInstance.setWorkChunksPurged(true);
|
||||
myJobPersistence.deleteChunks(myInstance.getInstanceId());
|
||||
myJobPersistence.updateInstance(myInstance);
|
||||
if (theInstance.isFinished() && !theInstance.isWorkChunksPurged()) {
|
||||
myJobInstanceProgressCalculator.calculateInstanceProgressAndPopulateInstance(theInstance);
|
||||
|
||||
theInstance.setWorkChunksPurged(true);
|
||||
myJobPersistence.deleteChunksAndMarkInstanceAsChunksPurged(theInstance.getInstanceId());
|
||||
myJobPersistence.updateInstance(theInstance);
|
||||
}
|
||||
}
|
||||
|
||||
private boolean purgeExpiredInstance() {
|
||||
if (myInstance.getEndTime() != null) {
|
||||
private boolean purgeExpiredInstance(JobInstance theInstance) {
|
||||
if (theInstance.getEndTime() != null) {
|
||||
long cutoff = System.currentTimeMillis() - PURGE_THRESHOLD;
|
||||
if (myInstance.getEndTime().getTime() < cutoff) {
|
||||
ourLog.info("Deleting old job instance {}", myInstance.getInstanceId());
|
||||
myJobPersistence.deleteInstanceAndChunks(myInstance.getInstanceId());
|
||||
if (theInstance.getEndTime().getTime() < cutoff) {
|
||||
ourLog.info("Deleting old job instance {}", theInstance.getInstanceId());
|
||||
myJobPersistence.deleteInstanceAndChunks(theInstance.getInstanceId());
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private void triggerGatedExecutions() {
|
||||
if (!myInstance.isRunning()) {
|
||||
private void triggerGatedExecutions(JobInstance theInstance) {
|
||||
if (!theInstance.isRunning()) {
|
||||
ourLog.debug("JobInstance {} is not in a \"running\" state. Status {}",
|
||||
myInstance.getInstanceId(), myInstance.getStatus().name());
|
||||
theInstance.getInstanceId(), theInstance.getStatus().name());
|
||||
return;
|
||||
}
|
||||
|
||||
if (!myInstance.hasGatedStep()) {
|
||||
if (!theInstance.hasGatedStep()) {
|
||||
return;
|
||||
}
|
||||
|
||||
JobWorkCursor<?, ?, ?> jobWorkCursor = JobWorkCursor.fromJobDefinitionAndRequestedStepId(myInstance.getJobDefinition(), myInstance.getCurrentGatedStepId());
|
||||
JobDefinition<? extends IModelJson> jobDefinition = myJobDefinitionegistry.getJobDefinitionOrThrowException(theInstance);
|
||||
JobWorkCursor<?, ?, ?> jobWorkCursor = JobWorkCursor.fromJobDefinitionAndRequestedStepId(jobDefinition, theInstance.getCurrentGatedStepId());
|
||||
|
||||
// final step
|
||||
if (jobWorkCursor.isFinalStep() && !jobWorkCursor.isReductionStep()) {
|
||||
ourLog.debug("Job instance {} is in final step and it's not a reducer step", myInstance.getInstanceId());
|
||||
ourLog.debug("Job instance {} is in final step and it's not a reducer step", theInstance.getInstanceId());
|
||||
return;
|
||||
}
|
||||
|
||||
// we should not be sending a second reduction step
|
||||
// to the queue if it's in finalize status
|
||||
if (jobWorkCursor.isReductionStep() && myInstance.getStatus() == StatusEnum.FINALIZE) {
|
||||
ourLog.warn("Job instance {} is still finalizing - a second reduction job will not be started.", myInstance.getInstanceId());
|
||||
return;
|
||||
}
|
||||
|
||||
String instanceId = myInstance.getInstanceId();
|
||||
String instanceId = theInstance.getInstanceId();
|
||||
String currentStepId = jobWorkCursor.getCurrentStepId();
|
||||
boolean shouldAdvance = myJobPersistence.canAdvanceInstanceToNextStep(instanceId, currentStepId);
|
||||
if (shouldAdvance) {
|
||||
|
@ -156,10 +166,11 @@ public class JobInstanceProcessor {
|
|||
ourLog.info("All processing is complete for gated execution of instance {} step {}. Proceeding to step {}", instanceId, currentStepId, nextStepId);
|
||||
|
||||
if (jobWorkCursor.nextStep.isReductionStep()) {
|
||||
processReductionStep(jobWorkCursor);
|
||||
JobWorkCursor<?, ?, ?> nextJobWorkCursor = JobWorkCursor.fromJobDefinitionAndRequestedStepId(jobDefinition, jobWorkCursor.nextStep.getStepId());
|
||||
myReductionStepExecutorService.triggerReductionStep(instanceId, nextJobWorkCursor);
|
||||
} else {
|
||||
// otherwise, continue processing as expected
|
||||
processChunksForNextSteps(instanceId, nextStepId);
|
||||
processChunksForNextSteps(theInstance, nextStepId);
|
||||
}
|
||||
} else {
|
||||
ourLog.debug("Not ready to advance gated execution of instance {} from step {} to {}.",
|
||||
|
@ -167,7 +178,8 @@ public class JobInstanceProcessor {
|
|||
}
|
||||
}
|
||||
|
||||
private void processChunksForNextSteps(String instanceId, String nextStepId) {
|
||||
private void processChunksForNextSteps(JobInstance theInstance, String nextStepId) {
|
||||
String instanceId = theInstance.getInstanceId();
|
||||
List<String> queuedChunksForNextStep = myProgressAccumulator.getChunkIdsWithStatus(instanceId, nextStepId, StatusEnum.QUEUED);
|
||||
int totalChunksForNextStep = myProgressAccumulator.getTotalChunkCountForInstanceAndStep(instanceId, nextStepId);
|
||||
if (totalChunksForNextStep != queuedChunksForNextStep.size()) {
|
||||
|
@ -175,21 +187,12 @@ public class JobInstanceProcessor {
|
|||
}
|
||||
List<String> chunksToSubmit = myJobPersistence.fetchallchunkidsforstepWithStatus(instanceId, nextStepId, StatusEnum.QUEUED);
|
||||
for (String nextChunkId : chunksToSubmit) {
|
||||
JobWorkNotification workNotification = new JobWorkNotification(myInstance, nextStepId, nextChunkId);
|
||||
JobWorkNotification workNotification = new JobWorkNotification(theInstance, nextStepId, nextChunkId);
|
||||
myBatchJobSender.sendWorkChannelMessage(workNotification);
|
||||
}
|
||||
ourLog.debug("Submitted a batch of chunks for processing. [chunkCount={}, instanceId={}, stepId={}]", chunksToSubmit.size(), instanceId, nextStepId);
|
||||
myInstance.setCurrentGatedStepId(nextStepId);
|
||||
myJobPersistence.updateInstance(myInstance);
|
||||
theInstance.setCurrentGatedStepId(nextStepId);
|
||||
myJobPersistence.updateInstance(theInstance);
|
||||
}
|
||||
|
||||
private void processReductionStep(JobWorkCursor<?, ?, ?> theWorkCursor) {
|
||||
JobWorkNotification workNotification = new JobWorkNotification(
|
||||
myInstance,
|
||||
theWorkCursor.nextStep.getStepId(),
|
||||
Batch2Constants.REDUCTION_STEP_CHUNK_ID_PLACEHOLDER // chunk id; we don't need it
|
||||
);
|
||||
ourLog.debug("Submitting a Work Notification for a job reduction step. No associated work chunk ids are available.");
|
||||
myBatchJobSender.sendWorkChannelMessage(workNotification);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ package ca.uhn.fhir.batch2.maintenance;
|
|||
|
||||
import ca.uhn.fhir.batch2.api.IJobMaintenanceService;
|
||||
import ca.uhn.fhir.batch2.api.IJobPersistence;
|
||||
import ca.uhn.fhir.batch2.api.IReductionStepExecutorService;
|
||||
import ca.uhn.fhir.batch2.channel.BatchJobSender;
|
||||
import ca.uhn.fhir.batch2.coordinator.JobDefinitionRegistry;
|
||||
import ca.uhn.fhir.batch2.coordinator.WorkChunkProcessor;
|
||||
|
@ -95,6 +96,7 @@ public class JobMaintenanceServiceImpl implements IJobMaintenanceService, IHasSc
|
|||
private long myScheduledJobFrequencyMillis = DateUtils.MILLIS_PER_MINUTE;
|
||||
private Runnable myMaintenanceJobStartedCallback = () -> {};
|
||||
private Runnable myMaintenanceJobFinishedCallback = () -> {};
|
||||
private final IReductionStepExecutorService myReductionStepExecutorService;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
|
@ -104,9 +106,10 @@ public class JobMaintenanceServiceImpl implements IJobMaintenanceService, IHasSc
|
|||
DaoConfig theDaoConfig,
|
||||
@Nonnull JobDefinitionRegistry theJobDefinitionRegistry,
|
||||
@Nonnull BatchJobSender theBatchJobSender,
|
||||
@Nonnull WorkChunkProcessor theExecutor
|
||||
) {
|
||||
@Nonnull WorkChunkProcessor theExecutor,
|
||||
@Nonnull IReductionStepExecutorService theReductionStepExecutorService) {
|
||||
myDaoConfig = theDaoConfig;
|
||||
myReductionStepExecutorService = theReductionStepExecutorService;
|
||||
Validate.notNull(theSchedulerService);
|
||||
Validate.notNull(theJobPersistence);
|
||||
Validate.notNull(theJobDefinitionRegistry);
|
||||
|
@ -162,9 +165,10 @@ public class JobMaintenanceServiceImpl implements IJobMaintenanceService, IHasSc
|
|||
try {
|
||||
ourLog.debug("There is no clustered scheduling service. Requesting semaphore to run maintenance pass directly.");
|
||||
// Some unit test, esp. the Loinc terminology tests, depend on this maintenance pass being run shortly after it is requested
|
||||
myRunMaintenanceSemaphore.tryAcquire(MAINTENANCE_TRIGGER_RUN_WITHOUT_SCHEDULER_TIMEOUT, TimeUnit.MINUTES);
|
||||
ourLog.debug("Semaphore acquired. Starting maintenance pass.");
|
||||
doMaintenancePass();
|
||||
if (myRunMaintenanceSemaphore.tryAcquire(MAINTENANCE_TRIGGER_RUN_WITHOUT_SCHEDULER_TIMEOUT, TimeUnit.MINUTES)) {
|
||||
ourLog.debug("Semaphore acquired. Starting maintenance pass.");
|
||||
doMaintenancePass();
|
||||
}
|
||||
return true;
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(Msg.code(2134) + "Timed out waiting to run a maintenance pass", e);
|
||||
|
@ -179,6 +183,7 @@ public class JobMaintenanceServiceImpl implements IJobMaintenanceService, IHasSc
|
|||
return myRunMaintenanceSemaphore.getQueueLength();
|
||||
}
|
||||
|
||||
@Override
|
||||
@VisibleForTesting
|
||||
public void forceMaintenancePass() {
|
||||
// to simulate a long running job!
|
||||
|
@ -210,11 +215,12 @@ public class JobMaintenanceServiceImpl implements IJobMaintenanceService, IHasSc
|
|||
List<JobInstance> instances = myJobPersistence.fetchInstances(INSTANCES_PER_PASS, page);
|
||||
|
||||
for (JobInstance instance : instances) {
|
||||
if (processedInstanceIds.add(instance.getInstanceId())) {
|
||||
String instanceId = instance.getInstanceId();
|
||||
if (processedInstanceIds.add(instanceId)) {
|
||||
myJobDefinitionRegistry.setJobDefinition(instance);
|
||||
JobInstanceProcessor jobInstanceProcessor = new JobInstanceProcessor(myJobPersistence,
|
||||
myBatchJobSender, instance, progressAccumulator, myJobExecutorSvc);
|
||||
ourLog.debug("Triggering maintenance process for instance {} in status {}", instance.getInstanceId(), instance.getStatus().name());
|
||||
myBatchJobSender, instanceId, progressAccumulator, myReductionStepExecutorService, myJobDefinitionRegistry);
|
||||
ourLog.debug("Triggering maintenance process for instance {} in status {}", instanceId, instance.getStatus().name());
|
||||
jobInstanceProcessor.process();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,8 +23,7 @@ package ca.uhn.fhir.batch2.model;
|
|||
public class ChunkOutcome {
|
||||
public enum Status {
|
||||
SUCCESS,
|
||||
FAIL,
|
||||
ABORT;
|
||||
FAILED;
|
||||
}
|
||||
|
||||
private final Status myStatus;
|
||||
|
|
|
@ -104,9 +104,6 @@ public class JobInstance extends JobInstanceStartRequest implements IModelJson,
|
|||
@JsonProperty(value = "report", access = JsonProperty.Access.READ_WRITE)
|
||||
private String myReport;
|
||||
|
||||
@JsonIgnore
|
||||
private JobDefinition<?> myJobDefinition;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
*/
|
||||
|
@ -138,7 +135,6 @@ public class JobInstance extends JobInstanceStartRequest implements IModelJson,
|
|||
setWorkChunksPurged(theJobInstance.isWorkChunksPurged());
|
||||
setCurrentGatedStepId(theJobInstance.getCurrentGatedStepId());
|
||||
setReport(theJobInstance.getReport());
|
||||
myJobDefinition = theJobInstance.getJobDefinition();
|
||||
}
|
||||
|
||||
public void setUpdateTime(Date theUpdateTime) {
|
||||
|
@ -308,16 +304,10 @@ public class JobInstance extends JobInstanceStartRequest implements IModelJson,
|
|||
|
||||
|
||||
public void setJobDefinition(JobDefinition<?> theJobDefinition) {
|
||||
myJobDefinition = theJobDefinition;
|
||||
setJobDefinitionId(theJobDefinition.getJobDefinitionId());
|
||||
setJobDefinitionVersion(theJobDefinition.getJobDefinitionVersion());
|
||||
}
|
||||
|
||||
@Override
|
||||
public JobDefinition<?> getJobDefinition() {
|
||||
return myJobDefinition;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isCancelled() {
|
||||
return myCancelled;
|
||||
|
@ -354,7 +344,7 @@ public class JobInstance extends JobInstanceStartRequest implements IModelJson,
|
|||
.append("errorMessage", myErrorMessage)
|
||||
.append("errorCount", myErrorCount)
|
||||
.append("estimatedTimeRemaining", myEstimatedTimeRemaining)
|
||||
.append("record", myReport)
|
||||
.append("report", myReport)
|
||||
.toString();
|
||||
}
|
||||
|
||||
|
@ -373,6 +363,11 @@ public class JobInstance extends JobInstanceStartRequest implements IModelJson,
|
|||
case IN_PROGRESS:
|
||||
case FINALIZE:
|
||||
return true;
|
||||
case COMPLETED:
|
||||
case ERRORED:
|
||||
case QUEUED:
|
||||
case FAILED:
|
||||
case CANCELLED:
|
||||
default:
|
||||
Logs.getBatchTroubleshootingLog().debug("Status {} is considered \"not running\"", getStatus().name());
|
||||
}
|
||||
|
|
|
@ -189,6 +189,10 @@ public enum StatusEnum {
|
|||
return myIncomplete;
|
||||
}
|
||||
|
||||
public boolean isEnded() {
|
||||
return myEnded;
|
||||
}
|
||||
|
||||
public boolean isCancellable() {
|
||||
return myIsCancellable;
|
||||
}
|
||||
|
|
|
@ -28,9 +28,12 @@ import com.fasterxml.jackson.annotation.JsonProperty;
|
|||
import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
|
||||
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
|
||||
import org.apache.commons.lang3.Validate;
|
||||
import org.apache.commons.lang3.builder.ToStringBuilder;
|
||||
|
||||
import java.util.Date;
|
||||
|
||||
import static org.apache.commons.lang3.StringUtils.isNotBlank;
|
||||
|
||||
public class WorkChunk implements IModelJson {
|
||||
|
||||
@JsonProperty("id")
|
||||
|
@ -229,11 +232,36 @@ public class WorkChunk implements IModelJson {
|
|||
return this;
|
||||
}
|
||||
|
||||
public Date getUpdateTime() {
|
||||
return myUpdateTime;
|
||||
}
|
||||
|
||||
public void setUpdateTime(Date theUpdateTime) {
|
||||
myUpdateTime = theUpdateTime;
|
||||
}
|
||||
|
||||
public Date getUpdateTime() {
|
||||
return myUpdateTime;
|
||||
@Override
|
||||
public String toString() {
|
||||
ToStringBuilder b = new ToStringBuilder(this);
|
||||
b.append("Id", myId);
|
||||
b.append("Sequence", mySequence);
|
||||
b.append("Status", myStatus);
|
||||
b.append("JobDefinitionId", myJobDefinitionId);
|
||||
b.append("JobDefinitionVersion", myJobDefinitionVersion);
|
||||
b.append("TargetStepId", myTargetStepId);
|
||||
b.append("InstanceId", myInstanceId);
|
||||
b.append("Data", isNotBlank(myData) ? "(present)" : "(absent)");
|
||||
b.append("CreateTime", myCreateTime);
|
||||
b.append("StartTime", myStartTime);
|
||||
b.append("EndTime", myEndTime);
|
||||
b.append("UpdateTime", myUpdateTime);
|
||||
b.append("RecordsProcessed", myRecordsProcessed);
|
||||
if (isNotBlank(myErrorMessage)) {
|
||||
b.append("ErrorMessage", myErrorMessage);
|
||||
}
|
||||
if (myErrorCount > 0) {
|
||||
b.append("ErrorCount", myErrorCount);
|
||||
}
|
||||
return b.toString();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ package ca.uhn.fhir.batch2.progress;
|
|||
*/
|
||||
|
||||
import ca.uhn.fhir.batch2.api.IJobPersistence;
|
||||
import ca.uhn.fhir.batch2.coordinator.JobDefinitionRegistry;
|
||||
import ca.uhn.fhir.batch2.maintenance.JobChunkProgressAccumulator;
|
||||
import ca.uhn.fhir.batch2.model.JobInstance;
|
||||
import ca.uhn.fhir.batch2.model.StatusEnum;
|
||||
|
@ -28,57 +29,70 @@ import ca.uhn.fhir.batch2.model.WorkChunk;
|
|||
import ca.uhn.fhir.util.Logs;
|
||||
import org.slf4j.Logger;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.util.Iterator;
|
||||
|
||||
public class JobInstanceProgressCalculator {
|
||||
private static final Logger ourLog = Logs.getBatchTroubleshootingLog();
|
||||
private final IJobPersistence myJobPersistence;
|
||||
private final JobInstance myInstance;
|
||||
private final JobChunkProgressAccumulator myProgressAccumulator;
|
||||
private final JobInstanceStatusUpdater myJobInstanceStatusUpdater;
|
||||
|
||||
public JobInstanceProgressCalculator(IJobPersistence theJobPersistence, JobInstance theInstance, JobChunkProgressAccumulator theProgressAccumulator) {
|
||||
public JobInstanceProgressCalculator(IJobPersistence theJobPersistence, JobChunkProgressAccumulator theProgressAccumulator, JobDefinitionRegistry theJobDefinitionRegistry) {
|
||||
myJobPersistence = theJobPersistence;
|
||||
myInstance = theInstance;
|
||||
myProgressAccumulator = theProgressAccumulator;
|
||||
myJobInstanceStatusUpdater = new JobInstanceStatusUpdater(theJobPersistence);
|
||||
myJobInstanceStatusUpdater = new JobInstanceStatusUpdater(theJobPersistence, theJobDefinitionRegistry);
|
||||
}
|
||||
|
||||
public void calculateAndStoreInstanceProgress() {
|
||||
InstanceProgress instanceProgress = new InstanceProgress();
|
||||
public void calculateAndStoreInstanceProgress(JobInstance theInstance) {
|
||||
String instanceId = theInstance.getInstanceId();
|
||||
|
||||
Iterator<WorkChunk> workChunkIterator = myJobPersistence.fetchAllWorkChunksIterator(myInstance.getInstanceId(), false);
|
||||
InstanceProgress instanceProgress = calculateInstanceProgress(instanceId);
|
||||
|
||||
if (instanceProgress.failed()) {
|
||||
myJobInstanceStatusUpdater.setFailed(theInstance);
|
||||
}
|
||||
|
||||
JobInstance currentInstance = myJobPersistence.fetchInstance(instanceId).orElse(null);
|
||||
if (currentInstance != null) {
|
||||
instanceProgress.updateInstance(currentInstance);
|
||||
|
||||
if (instanceProgress.changed() || currentInstance.getStatus() == StatusEnum.IN_PROGRESS) {
|
||||
if (currentInstance.getCombinedRecordsProcessed() > 0) {
|
||||
ourLog.info("Job {} of type {} has status {} - {} records processed ({}/sec) - ETA: {}", currentInstance.getInstanceId(), currentInstance.getJobDefinitionId(), currentInstance.getStatus(), currentInstance.getCombinedRecordsProcessed(), currentInstance.getCombinedRecordsProcessedPerSecond(), currentInstance.getEstimatedTimeRemaining());
|
||||
ourLog.debug(instanceProgress.toString());
|
||||
} else {
|
||||
ourLog.info("Job {} of type {} has status {} - {} records processed", currentInstance.getInstanceId(), currentInstance.getJobDefinitionId(), currentInstance.getStatus(), currentInstance.getCombinedRecordsProcessed());
|
||||
ourLog.debug(instanceProgress.toString());
|
||||
}
|
||||
}
|
||||
|
||||
if (instanceProgress.changed()) {
|
||||
if (instanceProgress.hasNewStatus()) {
|
||||
myJobInstanceStatusUpdater.updateInstanceStatus(currentInstance, instanceProgress.getNewStatus());
|
||||
} else {
|
||||
myJobPersistence.updateInstance(currentInstance);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
private InstanceProgress calculateInstanceProgress(String instanceId) {
|
||||
InstanceProgress instanceProgress = new InstanceProgress();
|
||||
Iterator<WorkChunk> workChunkIterator = myJobPersistence.fetchAllWorkChunksIterator(instanceId, false);
|
||||
|
||||
while (workChunkIterator.hasNext()) {
|
||||
WorkChunk next = workChunkIterator.next();
|
||||
myProgressAccumulator.addChunk(next);
|
||||
instanceProgress.addChunk(next);
|
||||
}
|
||||
return instanceProgress;
|
||||
}
|
||||
|
||||
instanceProgress.updateInstance(myInstance);
|
||||
|
||||
if (instanceProgress.failed()) {
|
||||
myJobInstanceStatusUpdater.setFailed(myInstance);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
if (instanceProgress.changed() || myInstance.getStatus() == StatusEnum.IN_PROGRESS) {
|
||||
if (myInstance.getCombinedRecordsProcessed() > 0) {
|
||||
ourLog.info("Job {} of type {} has status {} - {} records processed ({}/sec) - ETA: {}", myInstance.getInstanceId(), myInstance.getJobDefinitionId(), myInstance.getStatus(), myInstance.getCombinedRecordsProcessed(), myInstance.getCombinedRecordsProcessedPerSecond(), myInstance.getEstimatedTimeRemaining());
|
||||
ourLog.debug(instanceProgress.toString());
|
||||
} else {
|
||||
ourLog.info("Job {} of type {} has status {} - {} records processed", myInstance.getInstanceId(), myInstance.getJobDefinitionId(), myInstance.getStatus(), myInstance.getCombinedRecordsProcessed());
|
||||
ourLog.debug(instanceProgress.toString());
|
||||
}
|
||||
}
|
||||
|
||||
if (instanceProgress.changed()) {
|
||||
if (instanceProgress.hasNewStatus()) {
|
||||
myJobInstanceStatusUpdater.updateInstanceStatus(myInstance, instanceProgress.getNewStatus());
|
||||
} else {
|
||||
myJobPersistence.updateInstance(myInstance);
|
||||
}
|
||||
}
|
||||
public void calculateInstanceProgressAndPopulateInstance(JobInstance theInstance) {
|
||||
InstanceProgress progress = calculateInstanceProgress(theInstance.getInstanceId());
|
||||
progress.updateInstance(theInstance);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ package ca.uhn.fhir.batch2.progress;
|
|||
import ca.uhn.fhir.batch2.api.IJobCompletionHandler;
|
||||
import ca.uhn.fhir.batch2.api.IJobPersistence;
|
||||
import ca.uhn.fhir.batch2.api.JobCompletionDetails;
|
||||
import ca.uhn.fhir.batch2.coordinator.JobDefinitionRegistry;
|
||||
import ca.uhn.fhir.batch2.model.JobDefinition;
|
||||
import ca.uhn.fhir.batch2.model.JobInstance;
|
||||
import ca.uhn.fhir.batch2.model.StatusEnum;
|
||||
|
@ -35,9 +36,11 @@ import java.util.Optional;
|
|||
public class JobInstanceStatusUpdater {
|
||||
private static final Logger ourLog = Logs.getBatchTroubleshootingLog();
|
||||
private final IJobPersistence myJobPersistence;
|
||||
private final JobDefinitionRegistry myJobDefinitionRegistry;
|
||||
|
||||
public JobInstanceStatusUpdater(IJobPersistence theJobPersistence) {
|
||||
public JobInstanceStatusUpdater(IJobPersistence theJobPersistence, JobDefinitionRegistry theJobDefinitionRegistry) {
|
||||
myJobPersistence = theJobPersistence;
|
||||
myJobDefinitionRegistry = theJobDefinitionRegistry;
|
||||
}
|
||||
|
||||
public boolean updateInstanceStatus(JobInstance theJobInstance, StatusEnum theNewStatus) {
|
||||
|
@ -76,13 +79,15 @@ public class JobInstanceStatusUpdater {
|
|||
// the status change happened in.
|
||||
if (statusChanged) {
|
||||
ourLog.info("Changing job instance {} of type {} from {} to {}", theJobInstance.getInstanceId(), theJobInstance.getJobDefinitionId(), origStatus, theJobInstance.getStatus());
|
||||
handleStatusChange(origStatus, theJobInstance);
|
||||
handleStatusChange(theJobInstance);
|
||||
}
|
||||
return statusChanged;
|
||||
}
|
||||
|
||||
private <PT extends IModelJson> void handleStatusChange(StatusEnum theOrigStatus, JobInstance theJobInstance) {
|
||||
JobDefinition<PT> definition = (JobDefinition<PT>) theJobInstance.getJobDefinition();
|
||||
private <PT extends IModelJson> void handleStatusChange(JobInstance theJobInstance) {
|
||||
JobDefinition<PT> definition = myJobDefinitionRegistry.getJobDefinitionOrThrowException(theJobInstance);
|
||||
assert definition != null;
|
||||
|
||||
switch (theJobInstance.getStatus()) {
|
||||
case COMPLETED:
|
||||
invokeCompletionHandler(theJobInstance, definition, definition.getCompletionHandler());
|
||||
|
@ -94,6 +99,7 @@ public class JobInstanceStatusUpdater {
|
|||
case ERRORED:
|
||||
case QUEUED:
|
||||
case IN_PROGRESS:
|
||||
case FINALIZE:
|
||||
default:
|
||||
// do nothing
|
||||
}
|
||||
|
|
|
@ -30,10 +30,4 @@ public class Batch2Constants {
|
|||
* date when performing operations that pull resources by time windows.
|
||||
*/
|
||||
public static final Date BATCH_START_DATE = new InstantType("2000-01-01T00:00:00Z").getValue();
|
||||
|
||||
/**
|
||||
* This is a placeholder chunkid for the reduction step to allow it to be
|
||||
* used in the message handling
|
||||
*/
|
||||
public static final String REDUCTION_STEP_CHUNK_ID_PLACEHOLDER = "REDUCTION";
|
||||
}
|
||||
|
|
|
@ -19,6 +19,8 @@ import ca.uhn.fhir.batch2.model.MarkWorkChunkAsErrorRequest;
|
|||
import ca.uhn.fhir.batch2.model.StatusEnum;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunk;
|
||||
import ca.uhn.fhir.jpa.batch.models.Batch2JobStartResponse;
|
||||
import ca.uhn.fhir.jpa.dao.tx.IHapiTransactionService;
|
||||
import ca.uhn.fhir.jpa.dao.tx.NonTransactionalHapiTransactionService;
|
||||
import ca.uhn.fhir.jpa.subscription.channel.api.IChannelReceiver;
|
||||
import ca.uhn.fhir.jpa.subscription.channel.impl.LinkedBlockingChannel;
|
||||
import ca.uhn.fhir.model.api.IModelJson;
|
||||
|
@ -36,7 +38,6 @@ import org.mockito.Mock;
|
|||
import org.mockito.junit.jupiter.MockitoExtension;
|
||||
import org.mockito.stubbing.Answer;
|
||||
import org.springframework.messaging.MessageDeliveryException;
|
||||
import org.springframework.transaction.PlatformTransactionManager;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.util.Arrays;
|
||||
|
@ -69,8 +70,7 @@ public class JobCoordinatorImplTest extends BaseBatch2Test {
|
|||
private JobDefinitionRegistry myJobDefinitionRegistry;
|
||||
@Mock
|
||||
private IJobMaintenanceService myJobMaintenanceService;
|
||||
@Mock
|
||||
private PlatformTransactionManager myPlatformTransactionManager;
|
||||
private IHapiTransactionService myTransactionService = new NonTransactionalHapiTransactionService();
|
||||
|
||||
@Captor
|
||||
private ArgumentCaptor<StepExecutionDetails<TestJobParameters, VoidModel>> myStep1ExecutionDetailsCaptor;
|
||||
|
@ -89,7 +89,8 @@ public class JobCoordinatorImplTest extends BaseBatch2Test {
|
|||
public void beforeEach() {
|
||||
// The code refactored to keep the same functionality,
|
||||
// but in this service (so it's a real service here!)
|
||||
WorkChunkProcessor jobStepExecutorSvc = new WorkChunkProcessor(myJobInstancePersister, myBatchJobSender, myPlatformTransactionManager);
|
||||
WorkChunkProcessor jobStepExecutorSvc = new WorkChunkProcessor(myJobInstancePersister, myBatchJobSender, myTransactionService);
|
||||
JobStepExecutorFactory jobStepExecutorFactory = new JobStepExecutorFactory(myJobInstancePersister, myBatchJobSender, jobStepExecutorSvc, myJobMaintenanceService, myJobDefinitionRegistry);
|
||||
mySvc = new JobCoordinatorImpl(myBatchJobSender, myWorkChannelReceiver, myJobInstancePersister, myJobDefinitionRegistry, jobStepExecutorSvc, myJobMaintenanceService);
|
||||
}
|
||||
|
||||
|
@ -138,7 +139,7 @@ public class JobCoordinatorImplTest extends BaseBatch2Test {
|
|||
assertEquals(PARAM_2_VALUE, params.getParam2());
|
||||
assertEquals(PASSWORD_VALUE, params.getPassword());
|
||||
|
||||
verify(myJobInstancePersister, times(1)).markWorkChunkAsCompletedAndClearData(any(), eq(50));
|
||||
verify(myJobInstancePersister, times(1)).markWorkChunkAsCompletedAndClearData(eq(INSTANCE_ID), any(), eq(50));
|
||||
verify(myJobInstancePersister, times(0)).fetchWorkChunksWithoutData(any(), anyInt(), anyInt());
|
||||
verify(myBatchJobSender, times(2)).sendWorkChannelMessage(any());
|
||||
}
|
||||
|
@ -256,7 +257,7 @@ public class JobCoordinatorImplTest extends BaseBatch2Test {
|
|||
assertEquals(PARAM_2_VALUE, params.getParam2());
|
||||
assertEquals(PASSWORD_VALUE, params.getPassword());
|
||||
|
||||
verify(myJobInstancePersister, times(1)).markWorkChunkAsCompletedAndClearData(any(), eq(50));
|
||||
verify(myJobInstancePersister, times(1)).markWorkChunkAsCompletedAndClearData(eq(INSTANCE_ID), any(), eq(50));
|
||||
verify(myBatchJobSender, times(0)).sendWorkChannelMessage(any());
|
||||
}
|
||||
|
||||
|
@ -283,7 +284,7 @@ public class JobCoordinatorImplTest extends BaseBatch2Test {
|
|||
assertEquals(PARAM_2_VALUE, params.getParam2());
|
||||
assertEquals(PASSWORD_VALUE, params.getPassword());
|
||||
|
||||
verify(myJobInstancePersister, times(1)).markWorkChunkAsCompletedAndClearData(eq(CHUNK_ID), eq(50));
|
||||
verify(myJobInstancePersister, times(1)).markWorkChunkAsCompletedAndClearData(eq(INSTANCE_ID), eq(CHUNK_ID), eq(50));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -321,7 +322,7 @@ public class JobCoordinatorImplTest extends BaseBatch2Test {
|
|||
assertEquals(CHUNK_ID, capturedParams.getChunkId());
|
||||
assertEquals("This is an error message", capturedParams.getErrorMsg());
|
||||
|
||||
verify(myJobInstancePersister, times(1)).markWorkChunkAsCompletedAndClearData(eq(CHUNK_ID), eq(0));
|
||||
verify(myJobInstancePersister, times(1)).markWorkChunkAsCompletedAndClearData(eq(INSTANCE_ID), eq(CHUNK_ID), eq(0));
|
||||
|
||||
}
|
||||
|
||||
|
@ -354,7 +355,7 @@ public class JobCoordinatorImplTest extends BaseBatch2Test {
|
|||
assertEquals(PASSWORD_VALUE, params.getPassword());
|
||||
|
||||
verify(myJobInstancePersister, times(1)).incrementWorkChunkErrorCount(eq(CHUNK_ID), eq(2));
|
||||
verify(myJobInstancePersister, times(1)).markWorkChunkAsCompletedAndClearData(eq(CHUNK_ID), eq(50));
|
||||
verify(myJobInstancePersister, times(1)).markWorkChunkAsCompletedAndClearData(eq(INSTANCE_ID), eq(CHUNK_ID), eq(50));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -380,7 +381,7 @@ public class JobCoordinatorImplTest extends BaseBatch2Test {
|
|||
assertEquals(PARAM_2_VALUE, params.getParam2());
|
||||
assertEquals(PASSWORD_VALUE, params.getPassword());
|
||||
|
||||
verify(myJobInstancePersister, times(1)).markWorkChunkAsCompletedAndClearData(eq(CHUNK_ID), eq(50));
|
||||
verify(myJobInstancePersister, times(1)).markWorkChunkAsCompletedAndClearData(eq(INSTANCE_ID), eq(CHUNK_ID), eq(50));
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
|
|
|
@ -21,11 +21,14 @@ import org.mockito.ArgumentCaptor;
|
|||
import org.mockito.Mock;
|
||||
import org.mockito.junit.jupiter.MockitoExtension;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Optional;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
import static org.junit.jupiter.api.Assertions.fail;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.ArgumentMatchers.anyBoolean;
|
||||
import static org.mockito.ArgumentMatchers.anyString;
|
||||
import static org.mockito.ArgumentMatchers.eq;
|
||||
import static org.mockito.Mockito.verify;
|
||||
|
@ -35,6 +38,8 @@ import static org.mockito.Mockito.when;
|
|||
public class ReductionStepDataSinkTest {
|
||||
|
||||
private static final String INSTANCE_ID = "instanceId";
|
||||
@Mock
|
||||
private JobDefinitionRegistry myJobDefinitionRegistry;
|
||||
|
||||
private static class TestJobParameters implements IModelJson { }
|
||||
|
||||
|
@ -75,9 +80,8 @@ public class ReductionStepDataSinkTest {
|
|||
myDataSink = new ReductionStepDataSink<>(
|
||||
INSTANCE_ID,
|
||||
myWorkCursor,
|
||||
myJobDefinition,
|
||||
myJobPersistence
|
||||
);
|
||||
myJobPersistence,
|
||||
myJobDefinitionRegistry);
|
||||
ourLogger = (Logger) Logs.getBatchTroubleshootingLog();
|
||||
ourLogger.addAppender(myListAppender);
|
||||
}
|
||||
|
@ -92,6 +96,7 @@ public class ReductionStepDataSinkTest {
|
|||
// when
|
||||
when(myJobPersistence.fetchInstance(eq(INSTANCE_ID)))
|
||||
.thenReturn(Optional.of(JobInstance.fromInstanceId(INSTANCE_ID)));
|
||||
when(myJobPersistence.fetchAllWorkChunksIterator(any(), anyBoolean())).thenReturn(Collections.emptyIterator());
|
||||
|
||||
// test
|
||||
myDataSink.accept(chunkData);
|
||||
|
@ -117,6 +122,7 @@ public class ReductionStepDataSinkTest {
|
|||
// when
|
||||
when(myJobPersistence.fetchInstance(eq(INSTANCE_ID)))
|
||||
.thenReturn(Optional.of(JobInstance.fromInstanceId(INSTANCE_ID)));
|
||||
when(myJobPersistence.fetchAllWorkChunksIterator(any(), anyBoolean())).thenReturn(Collections.emptyIterator());
|
||||
|
||||
// test
|
||||
myDataSink.accept(firstData);
|
||||
|
|
|
@ -0,0 +1,246 @@
|
|||
package ca.uhn.fhir.batch2.coordinator;
|
||||
|
||||
import ca.uhn.fhir.batch2.api.ChunkExecutionDetails;
|
||||
import ca.uhn.fhir.batch2.api.IJobPersistence;
|
||||
import ca.uhn.fhir.batch2.api.IJobStepWorker;
|
||||
import ca.uhn.fhir.batch2.api.IReductionStepWorker;
|
||||
import ca.uhn.fhir.batch2.api.RunOutcome;
|
||||
import ca.uhn.fhir.batch2.api.StepExecutionDetails;
|
||||
import ca.uhn.fhir.batch2.api.VoidModel;
|
||||
import ca.uhn.fhir.batch2.model.ChunkOutcome;
|
||||
import ca.uhn.fhir.batch2.model.JobDefinition;
|
||||
import ca.uhn.fhir.batch2.model.JobDefinitionStep;
|
||||
import ca.uhn.fhir.batch2.model.JobInstance;
|
||||
import ca.uhn.fhir.batch2.model.JobWorkCursor;
|
||||
import ca.uhn.fhir.batch2.model.StatusEnum;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunk;
|
||||
import ca.uhn.fhir.jpa.dao.tx.IHapiTransactionService;
|
||||
import ca.uhn.fhir.jpa.dao.tx.NonTransactionalHapiTransactionService;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.junit.jupiter.MockitoExtension;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
import static ca.uhn.fhir.batch2.coordinator.WorkChunkProcessorTest.INSTANCE_ID;
|
||||
import static ca.uhn.fhir.batch2.coordinator.WorkChunkProcessorTest.JOB_DEFINITION_ID;
|
||||
import static ca.uhn.fhir.batch2.coordinator.WorkChunkProcessorTest.REDUCTION_STEP_ID;
|
||||
import static ca.uhn.fhir.batch2.coordinator.WorkChunkProcessorTest.StepInputData;
|
||||
import static ca.uhn.fhir.batch2.coordinator.WorkChunkProcessorTest.StepOutputData;
|
||||
import static ca.uhn.fhir.batch2.coordinator.WorkChunkProcessorTest.TestJobParameters;
|
||||
import static ca.uhn.fhir.batch2.coordinator.WorkChunkProcessorTest.createWorkChunk;
|
||||
import static ca.uhn.fhir.batch2.coordinator.WorkChunkProcessorTest.getTestJobInstance;
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.ArgumentMatchers.anyList;
|
||||
import static org.mockito.ArgumentMatchers.anyString;
|
||||
import static org.mockito.ArgumentMatchers.eq;
|
||||
import static org.mockito.Mockito.doThrow;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.never;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
@ExtendWith(MockitoExtension.class)
|
||||
public class ReductionStepExecutorServiceImplTest {
|
||||
|
||||
private final IHapiTransactionService myTransactionService = new NonTransactionalHapiTransactionService();
|
||||
@Mock
|
||||
private IJobPersistence myJobPersistence;
|
||||
@Mock
|
||||
private IReductionStepWorker<TestJobParameters, StepInputData, StepOutputData> myReductionStepWorker;
|
||||
// @Mock
|
||||
// private JobDefinitionStep<TestJobParameters, StepInputData, StepOutputData> myPreviousStep;
|
||||
// @Mock
|
||||
// private JobDefinitionStep<TestJobParameters, StepInputData, StepOutputData> myCurrentStep;
|
||||
private ReductionStepExecutorServiceImpl mySvc;
|
||||
private JobDefinitionRegistry myJobDefinitionRegistry = new JobDefinitionRegistry();
|
||||
|
||||
@BeforeEach
|
||||
public void before() {
|
||||
mySvc = new ReductionStepExecutorServiceImpl(myJobPersistence, myTransactionService, myJobDefinitionRegistry);
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void doExecution_reductionWithChunkFailed_marksAllFutureChunksAsFailedButPreviousAsSuccess() {
|
||||
// setup
|
||||
List<String> chunkIds = Arrays.asList("chunk1", "chunk2");
|
||||
List<WorkChunk> chunks = new ArrayList<>();
|
||||
for (String id : chunkIds) {
|
||||
chunks.add(createWorkChunk(id));
|
||||
}
|
||||
JobInstance jobInstance = getTestJobInstance();
|
||||
jobInstance.setStatus(StatusEnum.IN_PROGRESS);
|
||||
JobWorkCursor<TestJobParameters, StepInputData, StepOutputData> workCursor = mock(JobWorkCursor.class);
|
||||
|
||||
|
||||
// when
|
||||
when(workCursor.getCurrentStep()).thenReturn((JobDefinitionStep<TestJobParameters, StepInputData, StepOutputData>) createJobDefinition().getSteps().get(1));
|
||||
when(workCursor.getJobDefinition()).thenReturn(createJobDefinition());
|
||||
when(myJobPersistence.fetchInstance(eq(INSTANCE_ID))).thenReturn(Optional.of(jobInstance));
|
||||
when(myJobPersistence.markInstanceAsStatus(eq(INSTANCE_ID), eq(StatusEnum.FINALIZE))).thenReturn(true);
|
||||
when(myJobPersistence.fetchAllWorkChunksForStepStream(eq(INSTANCE_ID), eq(REDUCTION_STEP_ID)))
|
||||
.thenReturn(chunks.stream());
|
||||
when(myReductionStepWorker.consume(any(ChunkExecutionDetails.class)))
|
||||
.thenReturn(ChunkOutcome.SUCCESS())
|
||||
.thenReturn(new ChunkOutcome(ChunkOutcome.Status.FAILED));
|
||||
|
||||
// test
|
||||
ReductionStepChunkProcessingResponse result = mySvc.executeReductionStep(INSTANCE_ID, workCursor);
|
||||
|
||||
// verification
|
||||
assertFalse(result.isSuccessful());
|
||||
ArgumentCaptor<List> submittedListIds = ArgumentCaptor.forClass(List.class);
|
||||
ArgumentCaptor<StatusEnum> statusCaptor = ArgumentCaptor.forClass(StatusEnum.class);
|
||||
verify(myJobPersistence, times(chunkIds.size()))
|
||||
.markWorkChunksWithStatusAndWipeData(
|
||||
eq(INSTANCE_ID),
|
||||
submittedListIds.capture(),
|
||||
statusCaptor.capture(),
|
||||
any()
|
||||
);
|
||||
assertEquals(2, submittedListIds.getAllValues().size());
|
||||
List<String> list1 = submittedListIds.getAllValues().get(0);
|
||||
List<String> list2 = submittedListIds.getAllValues().get(1);
|
||||
assertTrue(list1.contains(chunkIds.get(0)));
|
||||
assertTrue(list2.contains(chunkIds.get(1)));
|
||||
|
||||
// assumes the order of which is called first
|
||||
// successes, then failures
|
||||
assertEquals(2, statusCaptor.getAllValues().size());
|
||||
List<StatusEnum> statuses = statusCaptor.getAllValues();
|
||||
assertEquals(StatusEnum.COMPLETED, statuses.get(0));
|
||||
assertEquals(StatusEnum.FAILED, statuses.get(1));
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void doExecution_reductionStepWithValidInput_executesAsExpected() {
|
||||
// setup
|
||||
List<String> chunkIds = Arrays.asList("chunk1", "chunk2");
|
||||
List<WorkChunk> chunks = new ArrayList<>();
|
||||
for (String id : chunkIds) {
|
||||
chunks.add(createWorkChunk(id));
|
||||
}
|
||||
JobInstance jobInstance = getTestJobInstance();
|
||||
jobInstance.setStatus(StatusEnum.IN_PROGRESS);
|
||||
JobWorkCursor<TestJobParameters, StepInputData, StepOutputData> workCursor = mock(JobWorkCursor.class);
|
||||
|
||||
|
||||
// when
|
||||
when(workCursor.getCurrentStep()).thenReturn((JobDefinitionStep<TestJobParameters, StepInputData, StepOutputData>) createJobDefinition().getSteps().get(1));
|
||||
when(workCursor.getJobDefinition()).thenReturn(createJobDefinition());
|
||||
when(myJobPersistence.fetchInstance(eq(INSTANCE_ID))).thenReturn(Optional.of(jobInstance));
|
||||
when(myJobPersistence.markInstanceAsStatus(eq(INSTANCE_ID), eq(StatusEnum.FINALIZE))).thenReturn(true);
|
||||
when(myJobPersistence.fetchAllWorkChunksForStepStream(eq(INSTANCE_ID), eq(REDUCTION_STEP_ID)))
|
||||
.thenReturn(chunks.stream());
|
||||
when(myReductionStepWorker.consume(any(ChunkExecutionDetails.class)))
|
||||
.thenReturn(ChunkOutcome.SUCCESS());
|
||||
when(myReductionStepWorker.run(any(StepExecutionDetails.class), any(BaseDataSink.class)))
|
||||
.thenReturn(RunOutcome.SUCCESS);
|
||||
|
||||
// test
|
||||
ReductionStepChunkProcessingResponse result = mySvc.executeReductionStep(INSTANCE_ID, workCursor);
|
||||
|
||||
// verify
|
||||
ArgumentCaptor<ChunkExecutionDetails> chunkCaptor = ArgumentCaptor.forClass(ChunkExecutionDetails.class);
|
||||
verify(myReductionStepWorker, times(chunks.size()))
|
||||
.consume(chunkCaptor.capture());
|
||||
List<ChunkExecutionDetails> chunksSubmitted = chunkCaptor.getAllValues();
|
||||
assertEquals(chunks.size(), chunksSubmitted.size());
|
||||
for (ChunkExecutionDetails submitted : chunksSubmitted) {
|
||||
assertTrue(chunkIds.contains(submitted.getChunkId()));
|
||||
}
|
||||
|
||||
assertTrue(result.isSuccessful());
|
||||
ArgumentCaptor<List<String>> chunkIdCaptor = ArgumentCaptor.forClass(List.class);
|
||||
verify(myJobPersistence).markWorkChunksWithStatusAndWipeData(eq(INSTANCE_ID),
|
||||
chunkIdCaptor.capture(), eq(StatusEnum.COMPLETED), eq(null));
|
||||
List<String> capturedIds = chunkIdCaptor.getValue();
|
||||
assertEquals(chunkIds.size(), capturedIds.size());
|
||||
for (String chunkId : chunkIds) {
|
||||
assertTrue(capturedIds.contains(chunkId));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void doExecution_reductionStepWithErrors_returnsFalseAndMarksPreviousChunksFailed() {
|
||||
// setup
|
||||
List<String> chunkIds = Arrays.asList("chunk1", "chunk2");
|
||||
List<WorkChunk> chunks = new ArrayList<>();
|
||||
for (String id : chunkIds) {
|
||||
chunks.add(createWorkChunk(id));
|
||||
}
|
||||
JobInstance jobInstance = getTestJobInstance();
|
||||
jobInstance.setStatus(StatusEnum.IN_PROGRESS);
|
||||
JobWorkCursor<TestJobParameters, StepInputData, StepOutputData> workCursor = mock(JobWorkCursor.class);
|
||||
|
||||
// when
|
||||
when(workCursor.getCurrentStep()).thenReturn((JobDefinitionStep<TestJobParameters, StepInputData, StepOutputData>) createJobDefinition().getSteps().get(1));
|
||||
when(workCursor.getJobDefinition()).thenReturn(createJobDefinition());
|
||||
when(myJobPersistence.fetchInstance(eq(INSTANCE_ID))).thenReturn(Optional.of(jobInstance));
|
||||
when(myJobPersistence.fetchAllWorkChunksForStepStream(eq(INSTANCE_ID), eq(REDUCTION_STEP_ID))).thenReturn(chunks.stream());
|
||||
when(myJobPersistence.markInstanceAsStatus(eq(INSTANCE_ID), eq(StatusEnum.FINALIZE))).thenReturn(true);
|
||||
doThrow(new RuntimeException("This is an error")).when(myReductionStepWorker).consume(any(ChunkExecutionDetails.class));
|
||||
|
||||
// test
|
||||
ReductionStepChunkProcessingResponse result = mySvc.executeReductionStep(INSTANCE_ID, workCursor);
|
||||
|
||||
// verify
|
||||
assertFalse(result.isSuccessful());
|
||||
ArgumentCaptor<String> chunkIdCaptor = ArgumentCaptor.forClass(String.class);
|
||||
ArgumentCaptor<String> errorCaptor = ArgumentCaptor.forClass(String.class);
|
||||
verify(myJobPersistence, times(chunkIds.size()))
|
||||
.markWorkChunkAsFailed(chunkIdCaptor.capture(), errorCaptor.capture());
|
||||
List<String> chunkIdsCaptured = chunkIdCaptor.getAllValues();
|
||||
List<String> errorsCaptured = errorCaptor.getAllValues();
|
||||
for (int i = 0; i < chunkIds.size(); i++) {
|
||||
String cId = chunkIdsCaptured.get(i);
|
||||
String error = errorsCaptured.get(i);
|
||||
|
||||
assertTrue(chunkIds.contains(cId));
|
||||
assertTrue(error.contains("Reduction step failed to execute chunk reduction for chunk"));
|
||||
}
|
||||
verify(myJobPersistence, never())
|
||||
.markWorkChunksWithStatusAndWipeData(anyString(), anyList(), any(), anyString());
|
||||
verify(myReductionStepWorker, never())
|
||||
.run(any(), any());
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private JobDefinition<TestJobParameters> createJobDefinition() {
|
||||
return JobDefinition.newBuilder()
|
||||
.setJobDefinitionId(JOB_DEFINITION_ID)
|
||||
.setJobDescription("Reduction job description")
|
||||
.setJobDefinitionVersion(1)
|
||||
.gatedExecution()
|
||||
.setParametersType(TestJobParameters.class)
|
||||
.addFirstStep(
|
||||
"step 1",
|
||||
"description 1",
|
||||
VoidModel.class,
|
||||
mock(IJobStepWorker.class) // we don't care about this step - we just need it
|
||||
)
|
||||
.addFinalReducerStep(
|
||||
REDUCTION_STEP_ID,
|
||||
"description 2",
|
||||
StepOutputData.class,
|
||||
myReductionStepWorker
|
||||
)
|
||||
.build();
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -23,6 +23,9 @@ import ca.uhn.fhir.batch2.model.MarkWorkChunkAsErrorRequest;
|
|||
import ca.uhn.fhir.batch2.model.StatusEnum;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunk;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunkData;
|
||||
import ca.uhn.fhir.jpa.dao.tx.HapiTransactionService;
|
||||
import ca.uhn.fhir.jpa.dao.tx.IHapiTransactionService;
|
||||
import ca.uhn.fhir.jpa.dao.tx.NonTransactionalHapiTransactionService;
|
||||
import ca.uhn.fhir.model.api.IModelJson;
|
||||
import ca.uhn.fhir.util.JsonUtil;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
|
@ -60,8 +63,8 @@ import static org.mockito.Mockito.when;
|
|||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
@ExtendWith(MockitoExtension.class)
|
||||
public class WorkChunkProcessorTest {
|
||||
private static final String INSTANCE_ID = "instanceId";
|
||||
private static final String JOB_DEFINITION_ID = "jobDefId";
|
||||
static final String INSTANCE_ID = "instanceId";
|
||||
static final String JOB_DEFINITION_ID = "jobDefId";
|
||||
public static final String REDUCTION_STEP_ID = "step last";
|
||||
|
||||
// static internal use classes
|
||||
|
@ -72,11 +75,11 @@ public class WorkChunkProcessorTest {
|
|||
FINAL
|
||||
}
|
||||
|
||||
private static class TestJobParameters implements IModelJson { }
|
||||
static class TestJobParameters implements IModelJson { }
|
||||
|
||||
private static class StepInputData implements IModelJson { }
|
||||
static class StepInputData implements IModelJson { }
|
||||
|
||||
private static class StepOutputData implements IModelJson { }
|
||||
static class StepOutputData implements IModelJson { }
|
||||
|
||||
private static class TestDataSink<OT extends IModelJson> extends BaseDataSink<TestJobParameters, StepInputData, OT> {
|
||||
|
||||
|
@ -105,8 +108,8 @@ public class WorkChunkProcessorTest {
|
|||
// our test class
|
||||
private class TestWorkChunkProcessor extends WorkChunkProcessor {
|
||||
|
||||
public TestWorkChunkProcessor(IJobPersistence thePersistence, BatchJobSender theSender, PlatformTransactionManager theTransactionManager) {
|
||||
super(thePersistence, theSender, theTransactionManager);
|
||||
public TestWorkChunkProcessor(IJobPersistence thePersistence, BatchJobSender theSender, IHapiTransactionService theHapiTransactionService) {
|
||||
super(thePersistence, theSender, theHapiTransactionService);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -139,8 +142,7 @@ public class WorkChunkProcessorTest {
|
|||
@Mock
|
||||
private BatchJobSender myJobSender;
|
||||
|
||||
@Mock
|
||||
private PlatformTransactionManager myMockTransactionManager;
|
||||
private IHapiTransactionService myMockTransactionManager = new NonTransactionalHapiTransactionService();
|
||||
|
||||
private TestWorkChunkProcessor myExecutorSvc;
|
||||
|
||||
|
@ -186,218 +188,6 @@ public class WorkChunkProcessorTest {
|
|||
return step;
|
||||
}
|
||||
|
||||
@Test
|
||||
public void doExecution_reductionStepWithValidInput_executesAsExpected() {
|
||||
// setup
|
||||
List<String> chunkIds = Arrays.asList("chunk1", "chunk2");
|
||||
List<WorkChunk> chunks = new ArrayList<>();
|
||||
for (String id : chunkIds) {
|
||||
chunks.add(createWorkChunk(id));
|
||||
}
|
||||
JobInstance jobInstance = getTestJobInstance();
|
||||
JobWorkCursor<TestJobParameters, StepInputData, StepOutputData> workCursor = mock(JobWorkCursor.class);
|
||||
JobDefinitionStep<TestJobParameters, StepInputData, StepOutputData> step = mockOutWorkCursor(StepType.REDUCTION, workCursor, true, false);
|
||||
|
||||
// when
|
||||
when(workCursor.isReductionStep())
|
||||
.thenReturn(true);
|
||||
when(myJobPersistence.fetchAllWorkChunksForStepStream(eq(INSTANCE_ID), eq(REDUCTION_STEP_ID)))
|
||||
.thenReturn(chunks.stream());
|
||||
when(myJobPersistence.markInstanceAsStatus(eq(INSTANCE_ID), eq(StatusEnum.FINALIZE))).thenReturn(true);
|
||||
when(myReductionStep.consume(any(ChunkExecutionDetails.class)))
|
||||
.thenReturn(ChunkOutcome.SUCCESS());
|
||||
when(myReductionStep.run(
|
||||
any(StepExecutionDetails.class), any(IJobDataSink.class)
|
||||
)).thenReturn(RunOutcome.SUCCESS);
|
||||
|
||||
// test
|
||||
JobStepExecutorOutput<?, ?, ?> result = myExecutorSvc.doExecution(
|
||||
workCursor,
|
||||
jobInstance,
|
||||
null
|
||||
);
|
||||
|
||||
// verify
|
||||
ArgumentCaptor<ChunkExecutionDetails> chunkCaptor = ArgumentCaptor.forClass(ChunkExecutionDetails.class);
|
||||
verify(myReductionStep, times(chunks.size()))
|
||||
.consume(chunkCaptor.capture());
|
||||
List<ChunkExecutionDetails> chunksSubmitted = chunkCaptor.getAllValues();
|
||||
assertEquals(chunks.size(), chunksSubmitted.size());
|
||||
for (ChunkExecutionDetails submitted : chunksSubmitted) {
|
||||
assertTrue(chunkIds.contains(submitted.getChunkId()));
|
||||
}
|
||||
|
||||
assertTrue(result.isSuccessful());
|
||||
assertTrue(myDataSink.myActualDataSink instanceof ReductionStepDataSink);
|
||||
ArgumentCaptor<StepExecutionDetails> executionDetsCaptor = ArgumentCaptor.forClass(StepExecutionDetails.class);
|
||||
verify(myReductionStep).run(executionDetsCaptor.capture(), eq(myDataSink));
|
||||
assertTrue(executionDetsCaptor.getValue() instanceof ReductionStepExecutionDetails);
|
||||
ArgumentCaptor<List<String>> chunkIdCaptor = ArgumentCaptor.forClass(List.class);
|
||||
verify(myJobPersistence).markWorkChunksWithStatusAndWipeData(eq(INSTANCE_ID),
|
||||
chunkIdCaptor.capture(), eq(StatusEnum.COMPLETED), eq(null));
|
||||
List<String> capturedIds = chunkIdCaptor.getValue();
|
||||
assertEquals(chunkIds.size(), capturedIds.size());
|
||||
for (String chunkId : chunkIds) {
|
||||
assertTrue(capturedIds.contains(chunkId));
|
||||
}
|
||||
|
||||
// nevers
|
||||
verifyNoErrors(0);
|
||||
verify(myNonReductionStep, never()).run(any(), any());
|
||||
verify(myLastStep, never()).run(any(), any());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void doExecution_reductionStepWithErrors_returnsFalseAndMarksPreviousChunksFailed() {
|
||||
// setup
|
||||
String errorMsg = "Exceptional!";
|
||||
List<String> chunkIds = Arrays.asList("chunk1", "chunk2");
|
||||
List<WorkChunk> chunks = new ArrayList<>();
|
||||
for (String id : chunkIds) {
|
||||
chunks.add(createWorkChunk(id));
|
||||
}
|
||||
JobInstance jobInstance = getTestJobInstance();
|
||||
JobWorkCursor<TestJobParameters, StepInputData, StepOutputData> workCursor = mock(JobWorkCursor.class);
|
||||
JobDefinitionStep<TestJobParameters, StepInputData, StepOutputData> step = mockOutWorkCursor(StepType.REDUCTION, workCursor, false, false);
|
||||
|
||||
// when
|
||||
when(workCursor.isReductionStep())
|
||||
.thenReturn(true);
|
||||
when(myJobPersistence.fetchAllWorkChunksForStepStream(eq(INSTANCE_ID), eq(REDUCTION_STEP_ID)))
|
||||
.thenReturn(chunks.stream());
|
||||
when(myJobPersistence.markInstanceAsStatus(eq(INSTANCE_ID), eq(StatusEnum.FINALIZE))).thenReturn(true);
|
||||
doThrow(new RuntimeException(errorMsg))
|
||||
.when(myReductionStep).consume(any(ChunkExecutionDetails.class));
|
||||
|
||||
// test
|
||||
JobStepExecutorOutput<?, ?, ?> result = myExecutorSvc.doExecution(
|
||||
workCursor,
|
||||
jobInstance,
|
||||
null
|
||||
);
|
||||
|
||||
// verify
|
||||
assertFalse(result.isSuccessful());
|
||||
ArgumentCaptor<String> chunkIdCaptor = ArgumentCaptor.forClass(String.class);
|
||||
ArgumentCaptor<String> errorCaptor = ArgumentCaptor.forClass(String.class);
|
||||
verify(myJobPersistence, times(chunkIds.size()))
|
||||
.markWorkChunkAsFailed(chunkIdCaptor.capture(), errorCaptor.capture());
|
||||
List<String> chunkIdsCaptured = chunkIdCaptor.getAllValues();
|
||||
List<String> errorsCaptured = errorCaptor.getAllValues();
|
||||
for (int i = 0; i < chunkIds.size(); i++) {
|
||||
String cId = chunkIdsCaptured.get(i);
|
||||
String error = errorsCaptured.get(i);
|
||||
|
||||
assertTrue(chunkIds.contains(cId));
|
||||
assertTrue(error.contains("Reduction step failed to execute chunk reduction for chunk"));
|
||||
}
|
||||
verify(myJobPersistence, never())
|
||||
.markWorkChunksWithStatusAndWipeData(anyString(), anyList(), any(), anyString());
|
||||
verify(myReductionStep, never())
|
||||
.run(any(), any());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void doExecution_reductionStepWithChunkFailures_marksChunkAsFailedButExecutesRestAsSuccess() {
|
||||
// setup
|
||||
List<String> chunkIds = Arrays.asList("chunk1", "chunk2");
|
||||
List<WorkChunk> chunks = new ArrayList<>();
|
||||
for (String id : chunkIds) {
|
||||
chunks.add(createWorkChunk(id));
|
||||
}
|
||||
JobInstance jobInstance = getTestJobInstance();
|
||||
JobWorkCursor<TestJobParameters, StepInputData, StepOutputData> workCursor = mock(JobWorkCursor.class);
|
||||
JobDefinitionStep<TestJobParameters, StepInputData, StepOutputData> step = mockOutWorkCursor(StepType.REDUCTION, workCursor, false, false);
|
||||
|
||||
// when
|
||||
when(workCursor.isReductionStep())
|
||||
.thenReturn(true);
|
||||
when(myJobPersistence.fetchAllWorkChunksForStepStream(eq(INSTANCE_ID), eq(REDUCTION_STEP_ID)))
|
||||
.thenReturn(chunks.stream());
|
||||
when(myJobPersistence.markInstanceAsStatus(eq(INSTANCE_ID), eq(StatusEnum.FINALIZE))).thenReturn(true);
|
||||
when(myReductionStep.consume(any(ChunkExecutionDetails.class)))
|
||||
.thenReturn(ChunkOutcome.SUCCESS())
|
||||
.thenReturn(new ChunkOutcome(ChunkOutcome.Status.FAIL));
|
||||
when(myReductionStep.run(any(StepExecutionDetails.class), any(BaseDataSink.class)))
|
||||
.thenReturn(RunOutcome.SUCCESS);
|
||||
|
||||
// test
|
||||
JobStepExecutorOutput<?, ?, ?> result = myExecutorSvc.doExecution(
|
||||
workCursor,
|
||||
jobInstance,
|
||||
null
|
||||
);
|
||||
|
||||
// verify
|
||||
assertFalse(result.isSuccessful());
|
||||
verify(myJobPersistence)
|
||||
.markWorkChunkAsFailed(eq(chunkIds.get(1)), anyString());
|
||||
ArgumentCaptor<List> chunkListCaptor = ArgumentCaptor.forClass(List.class);
|
||||
verify(myJobPersistence)
|
||||
.markWorkChunksWithStatusAndWipeData(eq(INSTANCE_ID),
|
||||
chunkListCaptor.capture(),
|
||||
eq(StatusEnum.COMPLETED),
|
||||
any());
|
||||
List<String> completedIds = chunkListCaptor.getValue();
|
||||
assertEquals(1, completedIds.size());
|
||||
assertEquals(chunkIds.get(0), completedIds.get(0));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void doExecution_reductionWithChunkAbort_marksAllFutureChunksAsFailedButPreviousAsSuccess() {
|
||||
// setup
|
||||
List<String> chunkIds = Arrays.asList("chunk1", "chunk2");
|
||||
List<WorkChunk> chunks = new ArrayList<>();
|
||||
for (String id : chunkIds) {
|
||||
chunks.add(createWorkChunk(id));
|
||||
}
|
||||
JobInstance jobInstance = getTestJobInstance();
|
||||
JobWorkCursor<TestJobParameters, StepInputData, StepOutputData> workCursor = mock(JobWorkCursor.class);
|
||||
JobDefinitionStep<TestJobParameters, StepInputData, StepOutputData> step = mockOutWorkCursor(StepType.REDUCTION, workCursor, false, false);
|
||||
|
||||
// when
|
||||
when(workCursor.isReductionStep())
|
||||
.thenReturn(true);
|
||||
when(myJobPersistence.markInstanceAsStatus(eq(INSTANCE_ID), eq(StatusEnum.FINALIZE))).thenReturn(true);
|
||||
when(myJobPersistence.fetchAllWorkChunksForStepStream(eq(INSTANCE_ID), eq(REDUCTION_STEP_ID)))
|
||||
.thenReturn(chunks.stream());
|
||||
when(myReductionStep.consume(any(ChunkExecutionDetails.class)))
|
||||
.thenReturn(ChunkOutcome.SUCCESS())
|
||||
.thenReturn(new ChunkOutcome(ChunkOutcome.Status.ABORT));
|
||||
when(myReductionStep.run(any(StepExecutionDetails.class), any(BaseDataSink.class)))
|
||||
.thenReturn(RunOutcome.SUCCESS);
|
||||
|
||||
// test
|
||||
JobStepExecutorOutput<?, ?, ?> result = myExecutorSvc.doExecution(
|
||||
workCursor,
|
||||
jobInstance,
|
||||
null
|
||||
);
|
||||
|
||||
// verification
|
||||
assertFalse(result.isSuccessful());
|
||||
ArgumentCaptor<List> submittedListIds = ArgumentCaptor.forClass(List.class);
|
||||
ArgumentCaptor<StatusEnum> statusCaptor = ArgumentCaptor.forClass(StatusEnum.class);
|
||||
verify(myJobPersistence, times(chunkIds.size()))
|
||||
.markWorkChunksWithStatusAndWipeData(
|
||||
eq(INSTANCE_ID),
|
||||
submittedListIds.capture(),
|
||||
statusCaptor.capture(),
|
||||
any()
|
||||
);
|
||||
assertEquals(2, submittedListIds.getAllValues().size());
|
||||
List<String> list1 = submittedListIds.getAllValues().get(0);
|
||||
List<String> list2 = submittedListIds.getAllValues().get(1);
|
||||
assertTrue(list1.contains(chunkIds.get(0)));
|
||||
assertTrue(list2.contains(chunkIds.get(1)));
|
||||
|
||||
// assumes the order of which is called first
|
||||
// successes, then failures
|
||||
assertEquals(2, statusCaptor.getAllValues().size());
|
||||
List<StatusEnum> statuses = statusCaptor.getAllValues();
|
||||
assertEquals(StatusEnum.COMPLETED, statuses.get(0));
|
||||
assertEquals(StatusEnum.FAILED, statuses.get(1));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void doExecution_nonReductionIntermediateStepWithValidInput_executesAsExpected() {
|
||||
|
@ -437,7 +227,7 @@ public class WorkChunkProcessorTest {
|
|||
// verify
|
||||
assertTrue(result.isSuccessful());
|
||||
verify(myJobPersistence)
|
||||
.markWorkChunkAsCompletedAndClearData(eq(chunk.getId()), anyInt());
|
||||
.markWorkChunkAsCompletedAndClearData(any(), eq(chunk.getId()), anyInt());
|
||||
assertTrue(myDataSink.myActualDataSink instanceof JobDataSink);
|
||||
|
||||
if (theRecoveredErrorsForDataSink > 0) {
|
||||
|
@ -616,14 +406,14 @@ public class WorkChunkProcessorTest {
|
|||
.fetchAllWorkChunksForStepStream(anyString(), anyString());
|
||||
}
|
||||
|
||||
private JobInstance getTestJobInstance() {
|
||||
static JobInstance getTestJobInstance() {
|
||||
JobInstance instance = JobInstance.fromInstanceId(INSTANCE_ID);
|
||||
instance.setParameters(new TestJobParameters());
|
||||
|
||||
return instance;
|
||||
}
|
||||
|
||||
private WorkChunk createWorkChunk(String theId) {
|
||||
static WorkChunk createWorkChunk(String theId) {
|
||||
WorkChunk chunk = new WorkChunk();
|
||||
chunk.setInstanceId(INSTANCE_ID);
|
||||
chunk.setId(theId);
|
||||
|
|
|
@ -2,6 +2,7 @@ package ca.uhn.fhir.batch2.maintenance;
|
|||
|
||||
import ca.uhn.fhir.batch2.api.IJobCompletionHandler;
|
||||
import ca.uhn.fhir.batch2.api.IJobPersistence;
|
||||
import ca.uhn.fhir.batch2.api.IReductionStepExecutorService;
|
||||
import ca.uhn.fhir.batch2.api.JobCompletionDetails;
|
||||
import ca.uhn.fhir.batch2.channel.BatchJobSender;
|
||||
import ca.uhn.fhir.batch2.coordinator.BaseBatch2Test;
|
||||
|
@ -82,8 +83,8 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
|
|||
private ArgumentCaptor<Message<JobWorkNotification>> myMessageCaptor;
|
||||
@Captor
|
||||
private ArgumentCaptor<JobCompletionDetails<TestJobParameters>> myJobCompletionCaptor;
|
||||
|
||||
private final JobInstance ourQueuedInstance = new JobInstance().setStatus(StatusEnum.QUEUED);
|
||||
@Mock
|
||||
private IReductionStepExecutorService myReductionStepExecutorService;
|
||||
|
||||
@BeforeEach
|
||||
public void beforeEach() {
|
||||
|
@ -94,8 +95,8 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
|
|||
myDaoConfig,
|
||||
myJobDefinitionRegistry,
|
||||
batchJobSender,
|
||||
myJobExecutorSvc
|
||||
);
|
||||
myJobExecutorSvc,
|
||||
myReductionStepExecutorService);
|
||||
myDaoConfig.setJobFastTrackingEnabled(true);
|
||||
}
|
||||
|
||||
|
@ -107,9 +108,6 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
|
|||
myJobDefinitionRegistry.addJobDefinition(createJobDefinition());
|
||||
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(createInstance()));
|
||||
|
||||
when(myJobPersistence.fetchAllWorkChunksIterator(eq(INSTANCE_ID), eq(false)))
|
||||
.thenReturn(chunks.iterator());
|
||||
|
||||
mySvc.runMaintenancePass();
|
||||
|
||||
verify(myJobPersistence, never()).updateInstance(any());
|
||||
|
@ -126,6 +124,7 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
|
|||
JobCoordinatorImplTest.createWorkChunkStep3().setStatus(StatusEnum.COMPLETED).setStartTime(parseTime("2022-02-12T14:01:00-04:00")).setEndTime(parseTime("2022-02-12T14:10:00-04:00")).setRecordsProcessed(25)
|
||||
);
|
||||
myJobDefinitionRegistry.addJobDefinition(createJobDefinition());
|
||||
when(myJobPersistence.fetchInstance(eq(INSTANCE_ID))).thenReturn(Optional.of(createInstance()));
|
||||
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(createInstance()));
|
||||
when(myJobPersistence.fetchAllWorkChunksIterator(eq(INSTANCE_ID), eq(false)))
|
||||
.thenReturn(chunks.iterator());
|
||||
|
@ -160,6 +159,7 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
|
|||
myJobDefinitionRegistry.addJobDefinition(createJobDefinition());
|
||||
JobInstance instance1 = createInstance();
|
||||
instance1.setErrorMessage("This is an error message");
|
||||
when(myJobPersistence.fetchInstance(eq(INSTANCE_ID))).thenReturn(Optional.of(createInstance()));
|
||||
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(instance1));
|
||||
when(myJobPersistence.fetchAllWorkChunksIterator(eq(INSTANCE_ID), eq(false)))
|
||||
.thenReturn(chunks.iterator());
|
||||
|
@ -199,6 +199,7 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
|
|||
JobInstance instance1 = createInstance();
|
||||
instance1.setCurrentGatedStepId(STEP_1);
|
||||
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(instance1));
|
||||
when(myJobPersistence.fetchInstance(eq(INSTANCE_ID))).thenReturn(Optional.of(instance1));
|
||||
|
||||
// Execute
|
||||
mySvc.runMaintenancePass();
|
||||
|
@ -221,6 +222,7 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
|
|||
instance.setStatus(StatusEnum.FAILED);
|
||||
instance.setEndTime(parseTime("2001-01-01T12:12:12Z"));
|
||||
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(instance));
|
||||
when(myJobPersistence.fetchInstance(eq(INSTANCE_ID))).thenReturn(Optional.of(instance));
|
||||
|
||||
mySvc.runMaintenancePass();
|
||||
|
||||
|
@ -253,11 +255,11 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
|
|||
);
|
||||
|
||||
myJobDefinitionRegistry.addJobDefinition(createJobDefinition(t -> t.completionHandler(myCompletionHandler)));
|
||||
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(createInstance()));
|
||||
when(myJobPersistence.fetchAllWorkChunksIterator(eq(INSTANCE_ID), anyBoolean()))
|
||||
.thenReturn(chunks.iterator());
|
||||
JobInstance instance1 = createInstance();
|
||||
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(instance1));
|
||||
when(myJobPersistence.fetchAllWorkChunksIterator(eq(INSTANCE_ID), anyBoolean())).thenAnswer(t->chunks.iterator());
|
||||
when(myJobPersistence.updateInstance(any())).thenReturn(true);
|
||||
when(myJobPersistence.fetchInstance(INSTANCE_ID)).thenReturn(Optional.of(ourQueuedInstance));
|
||||
when(myJobPersistence.fetchInstance(INSTANCE_ID)).thenReturn(Optional.of(instance1));
|
||||
|
||||
// Execute
|
||||
|
||||
|
@ -274,7 +276,7 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
|
|||
assertEquals(0.25, instance.getCombinedRecordsProcessedPerSecond());
|
||||
assertEquals(parseTime("2022-02-12T14:10:00-04:00"), instance.getEndTime());
|
||||
|
||||
verify(myJobPersistence, times(1)).deleteChunks(eq(INSTANCE_ID));
|
||||
verify(myJobPersistence, times(1)).deleteChunksAndMarkInstanceAsChunksPurged(eq(INSTANCE_ID));
|
||||
verify(myCompletionHandler, times(1)).jobComplete(myJobCompletionCaptor.capture());
|
||||
|
||||
verifyNoMoreInteractions(myJobPersistence);
|
||||
|
@ -306,14 +308,14 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
|
|||
);
|
||||
|
||||
myJobDefinitionRegistry.addJobDefinition(createJobDefinition());
|
||||
when(myJobPersistence.fetchInstance(eq(INSTANCE_ID))).thenReturn(Optional.of(createInstance()));
|
||||
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(createInstance()));
|
||||
when(myJobPersistence.fetchAllWorkChunksIterator(eq(INSTANCE_ID), anyBoolean()))
|
||||
.thenReturn(chunks.iterator());
|
||||
when(myJobPersistence.fetchInstance(INSTANCE_ID)).thenReturn(Optional.of(ourQueuedInstance));
|
||||
.thenAnswer(t->chunks.iterator());
|
||||
|
||||
mySvc.runMaintenancePass();
|
||||
|
||||
verify(myJobPersistence, times(2)).updateInstance(myInstanceCaptor.capture());
|
||||
verify(myJobPersistence, times(3)).updateInstance(myInstanceCaptor.capture());
|
||||
JobInstance instance = myInstanceCaptor.getAllValues().get(0);
|
||||
|
||||
assertEquals(0.8333333333333334, instance.getProgress());
|
||||
|
@ -323,7 +325,7 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
|
|||
assertEquals(0.25, instance.getCombinedRecordsProcessedPerSecond());
|
||||
assertEquals(parseTime("2022-02-12T14:10:00-04:00"), instance.getEndTime());
|
||||
|
||||
verify(myJobPersistence, times(1)).deleteChunks(eq(INSTANCE_ID));
|
||||
verify(myJobPersistence, times(1)).deleteChunksAndMarkInstanceAsChunksPurged(eq(INSTANCE_ID));
|
||||
|
||||
verifyNoMoreInteractions(myJobPersistence);
|
||||
}
|
||||
|
@ -343,11 +345,11 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
|
|||
JobCoordinatorImplTest.createWorkChunkStep2().setStatus(StatusEnum.QUEUED).setId(CHUNK_ID_2)
|
||||
);
|
||||
myJobDefinitionRegistry.addJobDefinition(createJobDefinition(JobDefinition.Builder::gatedExecution));
|
||||
when(myJobPersistence.fetchAllWorkChunksIterator(eq(INSTANCE_ID), anyBoolean()))
|
||||
.thenReturn(chunks.iterator());
|
||||
when(myJobPersistence.fetchAllWorkChunksIterator(eq(INSTANCE_ID), anyBoolean())).thenAnswer(t->chunks.iterator());
|
||||
JobInstance instance1 = createInstance();
|
||||
instance1.setCurrentGatedStepId(STEP_1);
|
||||
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(instance1));
|
||||
when(myJobPersistence.fetchInstance(eq(INSTANCE_ID))).thenReturn(Optional.of(instance1));
|
||||
|
||||
mySvc.runMaintenancePass();
|
||||
|
||||
|
@ -373,11 +375,11 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
|
|||
JobCoordinatorImplTest.createWorkChunkStep2().setStatus(StatusEnum.QUEUED).setId(CHUNK_ID_2)
|
||||
);
|
||||
myJobDefinitionRegistry.addJobDefinition(createJobDefinition(JobDefinition.Builder::gatedExecution));
|
||||
when(myJobPersistence.fetchAllWorkChunksIterator(eq(INSTANCE_ID), anyBoolean()))
|
||||
.thenReturn(chunks.iterator());
|
||||
when(myJobPersistence.fetchAllWorkChunksIterator(eq(INSTANCE_ID), anyBoolean())).thenAnswer(t->chunks.iterator());
|
||||
JobInstance instance1 = createInstance();
|
||||
instance1.setCurrentGatedStepId(STEP_1);
|
||||
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(instance1));
|
||||
when(myJobPersistence.fetchInstance(eq(INSTANCE_ID))).thenReturn(Optional.of(instance1));
|
||||
|
||||
mySvc.runMaintenancePass();
|
||||
mySvc.runMaintenancePass();
|
||||
|
|
|
@ -4,6 +4,7 @@ import ca.uhn.fhir.batch2.api.IJobCompletionHandler;
|
|||
import ca.uhn.fhir.batch2.api.IJobInstance;
|
||||
import ca.uhn.fhir.batch2.api.IJobPersistence;
|
||||
import ca.uhn.fhir.batch2.api.JobCompletionDetails;
|
||||
import ca.uhn.fhir.batch2.coordinator.JobDefinitionRegistry;
|
||||
import ca.uhn.fhir.batch2.model.JobDefinition;
|
||||
import ca.uhn.fhir.batch2.model.JobInstance;
|
||||
import ca.uhn.fhir.batch2.model.StatusEnum;
|
||||
|
@ -21,6 +22,7 @@ import java.util.concurrent.atomic.AtomicReference;
|
|||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertNull;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
@ExtendWith(MockitoExtension.class)
|
||||
|
@ -35,7 +37,8 @@ class JobInstanceStatusUpdaterTest {
|
|||
IJobPersistence myJobPersistence;
|
||||
@Mock
|
||||
private JobDefinition<TestParameters> myJobDefinition;
|
||||
|
||||
@Mock
|
||||
private JobDefinitionRegistry myJobDefinitionRegistry;
|
||||
@InjectMocks
|
||||
JobInstanceStatusUpdater mySvc;
|
||||
private JobInstance myInstance;
|
||||
|
@ -52,6 +55,8 @@ class JobInstanceStatusUpdaterTest {
|
|||
myInstance.setParameters(myTestParameters);
|
||||
myInstance.setErrorMessage(TEST_ERROR_MESSAGE);
|
||||
myInstance.setErrorCount(TEST_ERROR_COUNT);
|
||||
|
||||
when(myJobDefinitionRegistry.getJobDefinitionOrThrowException(any())).thenReturn((JobDefinition) myJobDefinition);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<parent>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-deployable-pom</artifactId>
|
||||
<version>6.4.2</version>
|
||||
<version>6.4.2-SNAPSHOT</version>
|
||||
<relativePath>../hapi-deployable-pom/pom.xml</relativePath>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue