Batch stabilization (#4647)
* Use java event names for work chunk transitions. * Cherry-pickd5ebd1f667
from rel_6_4 Avoid fetching work-chunk data (#4622) * add end time to reduction step (#4640) * add end time to reduction step * add changelog --------- Co-authored-by: Long Ma <long@smilecdr.com> (cherry picked from commit37f5e59ffc
) * Cancel processing Provide error message in cancelled jobs, and avoid transitions in final states. * Apply tx boundary to starting job and first chunk. * cleanup * Apply tx boundary to work chunk processing * Delete BatchWorkChunk * Introduce events for job create, and chunk dequeue * Apply tx boundary to chunk handler * Move instance cancellation to database * tx boundary around stats collection and completion * tx boundary around stats collection and completion * Extend tx boundary to error, fail, and cancel * Move failure into status calc * ERROR is not an "ended" state. * Revert generics cleanup to avoid noise * Avoid sending gated chunks twice. * Make no-data path safer. Cleanup * Fix mock test for step advance. * Delete unsafe updateInstace() call * Cleanup * Changelog and notes * Fix cancel boundary. Cleanups * Cleanup * Sort mongo chunks for stable paging. Other cleanup * Document error handling * Cleanup * Update hapi-fhir-jpaserver-test-utilities/src/main/java/ca/uhn/fhir/jpa/test/Batch2JobHelper.java Co-authored-by: StevenXLi <stevenli_8118@hotmail.com> --------- Co-authored-by: longma1 <32119004+longma1@users.noreply.github.com> Co-authored-by: StevenXLi <stevenli_8118@hotmail.com>
This commit is contained in:
parent
d6d2ff531f
commit
8813d9beda
|
@ -0,0 +1,4 @@
|
|||
---
|
||||
type: perf
|
||||
issue: 4622
|
||||
title: "The batch system now reads less data during the maintenance pass. This avoids slowdowns on large systems."
|
|
@ -0,0 +1,4 @@
|
|||
---
|
||||
type: fix
|
||||
issue: 4324
|
||||
title: "Fix issue where end time is missing for bulk exports on completion"
|
|
@ -0,0 +1,4 @@
|
|||
---
|
||||
type: fix
|
||||
issue: 4647
|
||||
title: "Batch job state transitions are are now transitionally safe."
|
|
@ -17,7 +17,7 @@
|
|||
* limitations under the License.
|
||||
* #L%
|
||||
*/
|
||||
package ca.uhn.fhir.jpa.util;
|
||||
package ca.uhn.fhir.jpa.batch2;
|
||||
|
||||
import ca.uhn.fhir.batch2.model.JobInstance;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunk;
|
||||
|
@ -26,7 +26,7 @@ import ca.uhn.fhir.jpa.entity.Batch2WorkChunkEntity;
|
|||
|
||||
import javax.annotation.Nonnull;
|
||||
|
||||
public class JobInstanceUtil {
|
||||
class JobInstanceUtil {
|
||||
|
||||
private JobInstanceUtil() {}
|
||||
|
||||
|
@ -63,14 +63,44 @@ public class JobInstanceUtil {
|
|||
return retVal;
|
||||
}
|
||||
|
||||
/**
|
||||
* Copies all JobInstance fields to a Batch2JobInstanceEntity
|
||||
* @param theJobInstance the job
|
||||
* @param theJobInstanceEntity the target entity
|
||||
*/
|
||||
public static void fromInstanceToEntity(@Nonnull JobInstance theJobInstance, @Nonnull Batch2JobInstanceEntity theJobInstanceEntity) {
|
||||
theJobInstanceEntity.setId(theJobInstance.getInstanceId());
|
||||
theJobInstanceEntity.setDefinitionId(theJobInstance.getJobDefinitionId());
|
||||
theJobInstanceEntity.setDefinitionVersion(theJobInstance.getJobDefinitionVersion());
|
||||
theJobInstanceEntity.setStatus(theJobInstance.getStatus());
|
||||
theJobInstanceEntity.setCancelled(theJobInstance.isCancelled());
|
||||
theJobInstanceEntity.setFastTracking(theJobInstance.isFastTracking());
|
||||
theJobInstanceEntity.setStartTime(theJobInstance.getStartTime());
|
||||
theJobInstanceEntity.setCreateTime(theJobInstance.getCreateTime());
|
||||
theJobInstanceEntity.setEndTime(theJobInstance.getEndTime());
|
||||
theJobInstanceEntity.setUpdateTime(theJobInstance.getUpdateTime());
|
||||
theJobInstanceEntity.setCombinedRecordsProcessed(theJobInstance.getCombinedRecordsProcessed());
|
||||
theJobInstanceEntity.setCombinedRecordsProcessedPerSecond(theJobInstance.getCombinedRecordsProcessedPerSecond());
|
||||
theJobInstanceEntity.setTotalElapsedMillis(theJobInstance.getTotalElapsedMillis());
|
||||
theJobInstanceEntity.setWorkChunksPurged(theJobInstance.isWorkChunksPurged());
|
||||
theJobInstanceEntity.setProgress(theJobInstance.getProgress());
|
||||
theJobInstanceEntity.setErrorMessage(theJobInstance.getErrorMessage());
|
||||
theJobInstanceEntity.setErrorCount(theJobInstance.getErrorCount());
|
||||
theJobInstanceEntity.setEstimatedTimeRemaining(theJobInstance.getEstimatedTimeRemaining());
|
||||
theJobInstanceEntity.setParams(theJobInstance.getParameters());
|
||||
theJobInstanceEntity.setCurrentGatedStepId(theJobInstance.getCurrentGatedStepId());
|
||||
theJobInstanceEntity.setReport(theJobInstance.getReport());
|
||||
theJobInstanceEntity.setEstimatedTimeRemaining(theJobInstance.getEstimatedTimeRemaining());
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts a Batch2WorkChunkEntity into a WorkChunk object
|
||||
*
|
||||
* @param theEntity - the entity to convert
|
||||
* @param theIncludeData - whether or not to include the Data attached to the chunk
|
||||
* @return - the WorkChunk object
|
||||
*/
|
||||
@Nonnull
|
||||
public static WorkChunk fromEntityToWorkChunk(@Nonnull Batch2WorkChunkEntity theEntity, boolean theIncludeData) {
|
||||
public static WorkChunk fromEntityToWorkChunk(@Nonnull Batch2WorkChunkEntity theEntity) {
|
||||
WorkChunk retVal = new WorkChunk();
|
||||
retVal.setId(theEntity.getId());
|
||||
retVal.setSequence(theEntity.getSequence());
|
||||
|
@ -86,11 +116,8 @@ public class JobInstanceUtil {
|
|||
retVal.setErrorMessage(theEntity.getErrorMessage());
|
||||
retVal.setErrorCount(theEntity.getErrorCount());
|
||||
retVal.setRecordsProcessed(theEntity.getRecordsProcessed());
|
||||
if (theIncludeData) {
|
||||
if (theEntity.getSerializedData() != null) {
|
||||
retVal.setData(theEntity.getSerializedData());
|
||||
}
|
||||
}
|
||||
// note: may be null out if queried NoData
|
||||
retVal.setData(theEntity.getSerializedData());
|
||||
return retVal;
|
||||
}
|
||||
}
|
|
@ -35,7 +35,6 @@ import ca.uhn.fhir.jpa.dao.data.IBatch2WorkChunkRepository;
|
|||
import ca.uhn.fhir.jpa.dao.tx.IHapiTransactionService;
|
||||
import ca.uhn.fhir.jpa.entity.Batch2JobInstanceEntity;
|
||||
import ca.uhn.fhir.jpa.entity.Batch2WorkChunkEntity;
|
||||
import ca.uhn.fhir.jpa.util.JobInstanceUtil;
|
||||
import ca.uhn.fhir.model.api.PagingIterator;
|
||||
import ca.uhn.fhir.util.Logs;
|
||||
import org.apache.commons.collections4.ListUtils;
|
||||
|
@ -52,6 +51,7 @@ import org.springframework.transaction.support.TransactionSynchronizationManager
|
|||
import javax.annotation.Nonnull;
|
||||
import javax.annotation.Nullable;
|
||||
import javax.persistence.EntityManager;
|
||||
import javax.persistence.LockModeType;
|
||||
import javax.persistence.Query;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Date;
|
||||
|
@ -64,6 +64,7 @@ import java.util.function.Consumer;
|
|||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static ca.uhn.fhir.batch2.coordinator.WorkChunkProcessor.MAX_CHUNK_ERROR_COUNT;
|
||||
import static ca.uhn.fhir.jpa.entity.Batch2WorkChunkEntity.ERROR_MSG_MAX_LENGTH;
|
||||
import static org.apache.commons.lang3.StringUtils.isBlank;
|
||||
|
||||
|
@ -89,7 +90,6 @@ public class JpaJobPersistenceImpl implements IJobPersistence {
|
|||
}
|
||||
|
||||
@Override
|
||||
@Transactional(propagation = Propagation.REQUIRED)
|
||||
public String onWorkChunkCreate(WorkChunkCreateEvent theBatchWorkChunk) {
|
||||
Batch2WorkChunkEntity entity = new Batch2WorkChunkEntity();
|
||||
entity.setId(UUID.randomUUID().toString());
|
||||
|
@ -102,6 +102,8 @@ public class JpaJobPersistenceImpl implements IJobPersistence {
|
|||
entity.setCreateTime(new Date());
|
||||
entity.setStartTime(new Date());
|
||||
entity.setStatus(WorkChunkStatusEnum.QUEUED);
|
||||
ourLog.debug("Create work chunk {}/{}/{}", entity.getInstanceId(), entity.getId(), entity.getTargetStepId());
|
||||
ourLog.trace("Create work chunk data {}/{}: {}", entity.getInstanceId(), entity.getId(), entity.getSerializedData());
|
||||
myWorkChunkRepository.save(entity);
|
||||
return entity.getId();
|
||||
}
|
||||
|
@ -109,13 +111,15 @@ public class JpaJobPersistenceImpl implements IJobPersistence {
|
|||
@Override
|
||||
@Transactional(propagation = Propagation.REQUIRED)
|
||||
public Optional<WorkChunk> onWorkChunkDequeue(String theChunkId) {
|
||||
int rowsModified = myWorkChunkRepository.updateChunkStatusForStart(theChunkId, new Date(), WorkChunkStatusEnum.IN_PROGRESS, List.of(WorkChunkStatusEnum.QUEUED, WorkChunkStatusEnum.ERRORED, WorkChunkStatusEnum.IN_PROGRESS));
|
||||
// NOTE: Ideally, IN_PROGRESS wouldn't be allowed here. On chunk failure, we probably shouldn't be allowed. But how does re-run happen if k8s kills a processor mid run?
|
||||
List<WorkChunkStatusEnum> priorStates = List.of(WorkChunkStatusEnum.QUEUED, WorkChunkStatusEnum.ERRORED, WorkChunkStatusEnum.IN_PROGRESS);
|
||||
int rowsModified = myWorkChunkRepository.updateChunkStatusForStart(theChunkId, new Date(), WorkChunkStatusEnum.IN_PROGRESS, priorStates);
|
||||
if (rowsModified == 0) {
|
||||
ourLog.info("Attempting to start chunk {} but it was already started.", theChunkId);
|
||||
return Optional.empty();
|
||||
} else {
|
||||
Optional<Batch2WorkChunkEntity> chunk = myWorkChunkRepository.findById(theChunkId);
|
||||
return chunk.map(t -> toChunk(t, true));
|
||||
return chunk.map(this::toChunk);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -229,8 +233,8 @@ public class JpaJobPersistenceImpl implements IJobPersistence {
|
|||
return myJobInstanceRepository.findAll(pageRequest).stream().map(this::toInstance).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
private WorkChunk toChunk(Batch2WorkChunkEntity theEntity, boolean theIncludeData) {
|
||||
return JobInstanceUtil.fromEntityToWorkChunk(theEntity, theIncludeData);
|
||||
private WorkChunk toChunk(Batch2WorkChunkEntity theEntity) {
|
||||
return JobInstanceUtil.fromEntityToWorkChunk(theEntity);
|
||||
}
|
||||
|
||||
private JobInstance toInstance(Batch2JobInstanceEntity theEntity) {
|
||||
|
@ -252,7 +256,7 @@ public class JpaJobPersistenceImpl implements IJobPersistence {
|
|||
"where myId = :chunkId and myErrorCount > :maxCount");
|
||||
query.setParameter("chunkId", chunkId);
|
||||
query.setParameter("failed", WorkChunkStatusEnum.FAILED);
|
||||
query.setParameter("maxCount", theParameters.getMaxRetries());
|
||||
query.setParameter("maxCount", MAX_CHUNK_ERROR_COUNT);
|
||||
int failChangeCount = query.executeUpdate();
|
||||
|
||||
if (failChangeCount > 0) {
|
||||
|
@ -289,7 +293,6 @@ public class JpaJobPersistenceImpl implements IJobPersistence {
|
|||
}
|
||||
|
||||
@Override
|
||||
@Transactional
|
||||
public void markWorkChunksWithStatusAndWipeData(String theInstanceId, List<String> theChunkIds, WorkChunkStatusEnum theStatus, String theErrorMessage) {
|
||||
assert TransactionSynchronizationManager.isActualTransactionActive();
|
||||
|
||||
|
@ -305,15 +308,16 @@ public class JpaJobPersistenceImpl implements IJobPersistence {
|
|||
@Transactional(propagation = Propagation.REQUIRES_NEW)
|
||||
public boolean canAdvanceInstanceToNextStep(String theInstanceId, String theCurrentStepId) {
|
||||
Optional<Batch2JobInstanceEntity> instance = myJobInstanceRepository.findById(theInstanceId);
|
||||
if (!instance.isPresent()) {
|
||||
if (instance.isEmpty()) {
|
||||
return false;
|
||||
}
|
||||
if (instance.get().getStatus().isEnded()) {
|
||||
return false;
|
||||
}
|
||||
List<WorkChunkStatusEnum> statusesForStep = myWorkChunkRepository.getDistinctStatusesForStep(theInstanceId, theCurrentStepId);
|
||||
Set<WorkChunkStatusEnum> statusesForStep = myWorkChunkRepository.getDistinctStatusesForStep(theInstanceId, theCurrentStepId);
|
||||
|
||||
ourLog.debug("Checking whether gated job can advanced to next step. [instanceId={}, currentStepId={}, statusesForStep={}]", theInstanceId, theCurrentStepId, statusesForStep);
|
||||
return statusesForStep.stream().noneMatch(WorkChunkStatusEnum::isIncomplete) && statusesForStep.stream().anyMatch(status -> status == WorkChunkStatusEnum.COMPLETED);
|
||||
return statusesForStep.isEmpty() || statusesForStep.equals(Set.of(WorkChunkStatusEnum.COMPLETED));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -331,9 +335,14 @@ public class JpaJobPersistenceImpl implements IJobPersistence {
|
|||
.withSystemRequest()
|
||||
.withPropagation(Propagation.REQUIRES_NEW)
|
||||
.execute(() -> {
|
||||
List<Batch2WorkChunkEntity> chunks = myWorkChunkRepository.fetchChunks(PageRequest.of(thePageIndex, thePageSize), theInstanceId);
|
||||
List<Batch2WorkChunkEntity> chunks;
|
||||
if (theIncludeData) {
|
||||
chunks = myWorkChunkRepository.fetchChunks(PageRequest.of(thePageIndex, thePageSize), theInstanceId);
|
||||
} else {
|
||||
chunks = myWorkChunkRepository.fetchChunksNoData(PageRequest.of(thePageIndex, thePageSize), theInstanceId);
|
||||
}
|
||||
for (Batch2WorkChunkEntity chunk : chunks) {
|
||||
theConsumer.accept(toChunk(chunk, theIncludeData));
|
||||
theConsumer.accept(toChunk(chunk));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -361,45 +370,30 @@ public class JpaJobPersistenceImpl implements IJobPersistence {
|
|||
}
|
||||
|
||||
@Override
|
||||
@Transactional(propagation = Propagation.MANDATORY)
|
||||
public Stream<WorkChunk> fetchAllWorkChunksForStepStream(String theInstanceId, String theStepId) {
|
||||
return myWorkChunkRepository.fetchChunksForStep(theInstanceId, theStepId).map(entity -> toChunk(entity, true));
|
||||
return myWorkChunkRepository.fetchChunksForStep(theInstanceId, theStepId).map(this::toChunk);
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the stored instance
|
||||
*
|
||||
* @param theInstance The instance - Must contain an ID
|
||||
* @return true if the status changed
|
||||
*/
|
||||
@Override
|
||||
@Transactional(propagation = Propagation.REQUIRES_NEW)
|
||||
public boolean updateInstance(JobInstance theInstance) {
|
||||
// Separate updating the status so we have atomic information about whether the status is changing
|
||||
int recordsChangedByStatusUpdate = myJobInstanceRepository.updateInstanceStatus(theInstance.getInstanceId(), theInstance.getStatus());
|
||||
@Transactional
|
||||
public boolean updateInstance(String theInstanceId, JobInstanceUpdateCallback theModifier) {
|
||||
Batch2JobInstanceEntity instanceEntity = myEntityManager.find(Batch2JobInstanceEntity.class, theInstanceId, LockModeType.PESSIMISTIC_WRITE);
|
||||
if (null == instanceEntity) {
|
||||
ourLog.error("No instance found with Id {}", theInstanceId);
|
||||
return false;
|
||||
}
|
||||
// convert to JobInstance for public api
|
||||
JobInstance jobInstance = JobInstanceUtil.fromEntityToInstance(instanceEntity);
|
||||
|
||||
Optional<Batch2JobInstanceEntity> instanceOpt = myJobInstanceRepository.findById(theInstance.getInstanceId());
|
||||
Batch2JobInstanceEntity instanceEntity = instanceOpt.orElseThrow(() -> new IllegalArgumentException("Unknown instance ID: " + theInstance.getInstanceId()));
|
||||
// run the modification callback
|
||||
boolean wasModified = theModifier.doUpdate(jobInstance);
|
||||
|
||||
instanceEntity.setStartTime(theInstance.getStartTime());
|
||||
instanceEntity.setEndTime(theInstance.getEndTime());
|
||||
instanceEntity.setStatus(theInstance.getStatus());
|
||||
instanceEntity.setCancelled(theInstance.isCancelled());
|
||||
instanceEntity.setFastTracking(theInstance.isFastTracking());
|
||||
instanceEntity.setCombinedRecordsProcessed(theInstance.getCombinedRecordsProcessed());
|
||||
instanceEntity.setCombinedRecordsProcessedPerSecond(theInstance.getCombinedRecordsProcessedPerSecond());
|
||||
instanceEntity.setTotalElapsedMillis(theInstance.getTotalElapsedMillis());
|
||||
instanceEntity.setWorkChunksPurged(theInstance.isWorkChunksPurged());
|
||||
instanceEntity.setProgress(theInstance.getProgress());
|
||||
instanceEntity.setErrorMessage(theInstance.getErrorMessage());
|
||||
instanceEntity.setErrorCount(theInstance.getErrorCount());
|
||||
instanceEntity.setEstimatedTimeRemaining(theInstance.getEstimatedTimeRemaining());
|
||||
instanceEntity.setCurrentGatedStepId(theInstance.getCurrentGatedStepId());
|
||||
instanceEntity.setReport(theInstance.getReport());
|
||||
if (wasModified) {
|
||||
// copy fields back for flush.
|
||||
JobInstanceUtil.fromInstanceToEntity(jobInstance, instanceEntity);
|
||||
}
|
||||
|
||||
myJobInstanceRepository.save(instanceEntity);
|
||||
|
||||
return recordsChangedByStatusUpdate > 0;
|
||||
return wasModified;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -433,38 +427,32 @@ public class JpaJobPersistenceImpl implements IJobPersistence {
|
|||
return recordsChanged > 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean markInstanceAsStatusWhenStatusIn(String theInstance, StatusEnum theStatusEnum, Set<StatusEnum> thePriorStates) {
|
||||
int recordsChanged = myJobInstanceRepository.updateInstanceStatus(theInstance, theStatusEnum);
|
||||
ourLog.debug("Update job {} to status {} if in status {}: {}", theInstance, theStatusEnum, thePriorStates, recordsChanged>0);
|
||||
return recordsChanged > 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
@Transactional(propagation = Propagation.REQUIRES_NEW)
|
||||
public JobOperationResultJson cancelInstance(String theInstanceId) {
|
||||
int recordsChanged = myJobInstanceRepository.updateInstanceCancelled(theInstanceId, true);
|
||||
String operationString = "Cancel job instance " + theInstanceId;
|
||||
|
||||
// TODO MB this is much too detailed to be down here - this should be up at the api layer. Replace with simple enum.
|
||||
String messagePrefix = "Job instance <" + theInstanceId + ">";
|
||||
if (recordsChanged > 0) {
|
||||
return JobOperationResultJson.newSuccess(operationString, "Job instance <" + theInstanceId + "> successfully cancelled.");
|
||||
return JobOperationResultJson.newSuccess(operationString, messagePrefix + " successfully cancelled.");
|
||||
} else {
|
||||
Optional<JobInstance> instance = fetchInstance(theInstanceId);
|
||||
if (instance.isPresent()) {
|
||||
return JobOperationResultJson.newFailure(operationString, "Job instance <" + theInstanceId + "> was already cancelled. Nothing to do.");
|
||||
return JobOperationResultJson.newFailure(operationString, messagePrefix + " was already cancelled. Nothing to do.");
|
||||
} else {
|
||||
return JobOperationResultJson.newFailure(operationString, "Job instance <" + theInstanceId + "> not found.");
|
||||
return JobOperationResultJson.newFailure(operationString, messagePrefix + " not found.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void processCancelRequests() {
|
||||
myTransactionService
|
||||
.withSystemRequest()
|
||||
.execute(()->{
|
||||
Query query = myEntityManager.createQuery(
|
||||
"UPDATE Batch2JobInstanceEntity b " +
|
||||
"set myStatus = ca.uhn.fhir.batch2.model.StatusEnum.CANCELLED " +
|
||||
"where myCancelled = true " +
|
||||
"AND myStatus IN (:states)");
|
||||
query.setParameter("states", StatusEnum.CANCELLED.getPriorStates());
|
||||
query.executeUpdate();
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -37,6 +37,10 @@ public interface IBatch2JobInstanceRepository extends JpaRepository<Batch2JobIns
|
|||
@Query("UPDATE Batch2JobInstanceEntity e SET e.myStatus = :status WHERE e.myId = :id and e.myStatus <> :status")
|
||||
int updateInstanceStatus(@Param("id") String theInstanceId, @Param("status") StatusEnum theStatus);
|
||||
|
||||
@Modifying
|
||||
@Query("UPDATE Batch2JobInstanceEntity e SET e.myStatus = :status WHERE e.myId = :id and e.myStatus IN ( :prior_states )")
|
||||
int updateInstanceStatusIfIn(@Param("id") String theInstanceId, @Param("status") StatusEnum theNewState, @Param("prior_states") Set<StatusEnum> thePriorStates);
|
||||
|
||||
@Modifying
|
||||
@Query("UPDATE Batch2JobInstanceEntity e SET e.myUpdateTime = :updated WHERE e.myId = :id")
|
||||
int updateInstanceUpdateTime(@Param("id") String theInstanceId, @Param("updated") Date theUpdated);
|
||||
|
|
|
@ -30,22 +30,29 @@ import org.springframework.data.repository.query.Param;
|
|||
import java.util.Collection;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public interface IBatch2WorkChunkRepository extends JpaRepository<Batch2WorkChunkEntity, String>, IHapiFhirJpaRepository {
|
||||
|
||||
@Query("SELECT e FROM Batch2WorkChunkEntity e WHERE e.myInstanceId = :instanceId ORDER BY e.mySequence ASC")
|
||||
// NOTE we need a stable sort so paging is reliable.
|
||||
// Warning: mySequence is not unique - it is reset for every chunk. So we also sort by myId.
|
||||
@Query("SELECT e FROM Batch2WorkChunkEntity e WHERE e.myInstanceId = :instanceId ORDER BY e.mySequence ASC, e.myId ASC")
|
||||
List<Batch2WorkChunkEntity> fetchChunks(Pageable thePageRequest, @Param("instanceId") String theInstanceId);
|
||||
|
||||
@Query("SELECT DISTINCT e.myStatus from Batch2WorkChunkEntity e where e.myInstanceId = :instanceId AND e.myTargetStepId = :stepId")
|
||||
List<WorkChunkStatusEnum> getDistinctStatusesForStep(@Param("instanceId") String theInstanceId, @Param("stepId") String theStepId);
|
||||
|
||||
/**
|
||||
* Deprecated, use {@link ca.uhn.fhir.jpa.dao.data.IBatch2WorkChunkRepository#fetchChunksForStep(String, String)}
|
||||
* A projection query to avoid fetching the CLOB over the wire.
|
||||
* Otherwise, the same as fetchChunks.
|
||||
*/
|
||||
@Deprecated
|
||||
@Query("SELECT e FROM Batch2WorkChunkEntity e WHERE e.myInstanceId = :instanceId AND e.myTargetStepId = :targetStepId ORDER BY e.mySequence ASC")
|
||||
List<Batch2WorkChunkEntity> fetchChunksForStep(Pageable thePageRequest, @Param("instanceId") String theInstanceId, @Param("targetStepId") String theTargetStepId);
|
||||
@Query("SELECT new Batch2WorkChunkEntity(" +
|
||||
"e.myId, e.mySequence, e.myJobDefinitionId, e.myJobDefinitionVersion, e.myInstanceId, e.myTargetStepId, e.myStatus," +
|
||||
"e.myCreateTime, e.myStartTime, e.myUpdateTime, e.myEndTime," +
|
||||
"e.myErrorMessage, e.myErrorCount, e.myRecordsProcessed" +
|
||||
") FROM Batch2WorkChunkEntity e WHERE e.myInstanceId = :instanceId ORDER BY e.mySequence ASC, e.myId ASC")
|
||||
List<Batch2WorkChunkEntity> fetchChunksNoData(Pageable thePageRequest, @Param("instanceId") String theInstanceId);
|
||||
|
||||
@Query("SELECT DISTINCT e.myStatus from Batch2WorkChunkEntity e where e.myInstanceId = :instanceId AND e.myTargetStepId = :stepId")
|
||||
Set<WorkChunkStatusEnum> getDistinctStatusesForStep(@Param("instanceId") String theInstanceId, @Param("stepId") String theStepId);
|
||||
|
||||
@Query("SELECT e FROM Batch2WorkChunkEntity e WHERE e.myInstanceId = :instanceId AND e.myTargetStepId = :targetStepId ORDER BY e.mySequence ASC")
|
||||
Stream<Batch2WorkChunkEntity> fetchChunksForStep(@Param("instanceId") String theInstanceId, @Param("targetStepId") String theTargetStepId);
|
||||
|
|
|
@ -195,6 +195,10 @@ public class Batch2JobInstanceEntity implements Serializable {
|
|||
myEndTime = theEndTime;
|
||||
}
|
||||
|
||||
public void setUpdateTime(Date theTime) {
|
||||
myUpdateTime = theTime;
|
||||
}
|
||||
|
||||
public Date getUpdateTime() {
|
||||
return myUpdateTime;
|
||||
}
|
||||
|
|
|
@ -97,6 +97,36 @@ public class Batch2WorkChunkEntity implements Serializable {
|
|||
@Column(name = "ERROR_COUNT", nullable = false)
|
||||
private int myErrorCount;
|
||||
|
||||
|
||||
/**
|
||||
* Default constructor for Hibernate.
|
||||
*/
|
||||
public Batch2WorkChunkEntity() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Projection constructor for no-date path.
|
||||
*/
|
||||
public Batch2WorkChunkEntity(String theId, int theSequence, String theJobDefinitionId, int theJobDefinitionVersion,
|
||||
String theInstanceId, String theTargetStepId, WorkChunkStatusEnum theStatus,
|
||||
Date theCreateTime, Date theStartTime, Date theUpdateTime, Date theEndTime,
|
||||
String theErrorMessage, int theErrorCount, Integer theRecordsProcessed) {
|
||||
myId = theId;
|
||||
mySequence = theSequence;
|
||||
myJobDefinitionId = theJobDefinitionId;
|
||||
myJobDefinitionVersion = theJobDefinitionVersion;
|
||||
myInstanceId = theInstanceId;
|
||||
myTargetStepId = theTargetStepId;
|
||||
myStatus = theStatus;
|
||||
myCreateTime = theCreateTime;
|
||||
myStartTime = theStartTime;
|
||||
myUpdateTime = theUpdateTime;
|
||||
myEndTime = theEndTime;
|
||||
myErrorMessage = theErrorMessage;
|
||||
myErrorCount = theErrorCount;
|
||||
myRecordsProcessed = theRecordsProcessed;
|
||||
}
|
||||
|
||||
public int getErrorCount() {
|
||||
return myErrorCount;
|
||||
}
|
||||
|
@ -241,4 +271,5 @@ public class Batch2WorkChunkEntity implements Serializable {
|
|||
.append("errorMessage", myErrorMessage)
|
||||
.toString();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
package ca.uhn.fhir.jpa.batch2;
|
||||
|
||||
import ca.uhn.fhir.batch2.model.JobInstance;
|
||||
import ca.uhn.fhir.jpa.entity.Batch2JobInstanceEntity;
|
||||
import ca.uhn.fhir.test.utilities.RandomDataHelper;
|
||||
import org.apache.commons.lang3.builder.EqualsBuilder;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.*;
|
||||
|
||||
class JobInstanceUtilTest {
|
||||
|
||||
/**
|
||||
* Fill with random data and round-trip via instance.
|
||||
*/
|
||||
@Test
|
||||
void fromEntityToInstance() {
|
||||
JobInstance instance = new JobInstance();
|
||||
RandomDataHelper.fillFieldsRandomly(instance);
|
||||
|
||||
Batch2JobInstanceEntity entity = new Batch2JobInstanceEntity();
|
||||
JobInstanceUtil.fromInstanceToEntity(instance, entity);
|
||||
JobInstance instanceCopyBack = JobInstanceUtil.fromEntityToInstance(entity);
|
||||
|
||||
assertTrue(EqualsBuilder.reflectionEquals(instance, instanceCopyBack));
|
||||
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -4,15 +4,11 @@ import ca.uhn.fhir.batch2.api.JobOperationResultJson;
|
|||
import ca.uhn.fhir.batch2.model.FetchJobInstancesRequest;
|
||||
import ca.uhn.fhir.batch2.model.JobInstance;
|
||||
import ca.uhn.fhir.batch2.model.StatusEnum;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunk;
|
||||
import ca.uhn.fhir.jpa.dao.data.IBatch2JobInstanceRepository;
|
||||
import ca.uhn.fhir.jpa.dao.data.IBatch2WorkChunkRepository;
|
||||
import ca.uhn.fhir.jpa.dao.tx.IHapiTransactionService;
|
||||
import ca.uhn.fhir.jpa.dao.tx.NonTransactionalHapiTransactionService;
|
||||
import ca.uhn.fhir.jpa.entity.Batch2JobInstanceEntity;
|
||||
import ca.uhn.fhir.jpa.entity.Batch2WorkChunkEntity;
|
||||
import ca.uhn.fhir.jpa.util.JobInstanceUtil;
|
||||
import ca.uhn.fhir.model.api.PagingIterator;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
|
@ -20,27 +16,20 @@ import org.mockito.InjectMocks;
|
|||
import org.mockito.Mock;
|
||||
import org.mockito.Spy;
|
||||
import org.mockito.junit.jupiter.MockitoExtension;
|
||||
import org.springframework.data.domain.PageRequest;
|
||||
import org.springframework.data.domain.Pageable;
|
||||
import org.springframework.transaction.PlatformTransactionManager;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.time.LocalDate;
|
||||
import java.time.ZoneOffset;
|
||||
import java.util.Arrays;
|
||||
import java.util.Date;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||
import static org.junit.jupiter.api.Assertions.assertNull;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
import static org.junit.jupiter.api.Assertions.fail;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.ArgumentMatchers.anyString;
|
||||
import static org.mockito.ArgumentMatchers.eq;
|
||||
import static org.mockito.Mockito.never;
|
||||
import static org.mockito.Mockito.reset;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
|
@ -123,7 +112,7 @@ class JpaJobPersistenceImplTest {
|
|||
|
||||
// verify
|
||||
verify(myWorkChunkRepository)
|
||||
.deleteAllForInstance(eq(jobId));
|
||||
.deleteAllForInstance(jobId);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -136,123 +125,9 @@ class JpaJobPersistenceImplTest {
|
|||
|
||||
// verify
|
||||
verify(myWorkChunkRepository)
|
||||
.deleteAllForInstance(eq(jobid));
|
||||
.deleteAllForInstance(jobid);
|
||||
verify(myJobInstanceRepository)
|
||||
.deleteById(eq(jobid));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void updateInstance_withInstance_checksInstanceExistsAndCallsSave() {
|
||||
// setup
|
||||
JobInstance toSave = createJobInstanceWithDemoData();
|
||||
Batch2JobInstanceEntity entity = new Batch2JobInstanceEntity();
|
||||
entity.setId(toSave.getInstanceId());
|
||||
|
||||
// when
|
||||
when(myJobInstanceRepository.findById(eq(toSave.getInstanceId())))
|
||||
.thenReturn(Optional.of(entity));
|
||||
|
||||
// test
|
||||
mySvc.updateInstance(toSave);
|
||||
|
||||
// verify
|
||||
ArgumentCaptor<Batch2JobInstanceEntity> entityCaptor = ArgumentCaptor.forClass(Batch2JobInstanceEntity.class);
|
||||
verify(myJobInstanceRepository)
|
||||
.save(entityCaptor.capture());
|
||||
Batch2JobInstanceEntity saved = entityCaptor.getValue();
|
||||
assertEquals(toSave.getInstanceId(), saved.getId());
|
||||
assertEquals(toSave.getStatus(), saved.getStatus());
|
||||
assertEquals(toSave.getStartTime(), entity.getStartTime());
|
||||
assertEquals(toSave.getEndTime(), entity.getEndTime());
|
||||
assertEquals(toSave.isCancelled(), entity.isCancelled());
|
||||
assertEquals(toSave.getCombinedRecordsProcessed(), entity.getCombinedRecordsProcessed());
|
||||
assertEquals(toSave.getCombinedRecordsProcessedPerSecond(), entity.getCombinedRecordsProcessedPerSecond());
|
||||
assertEquals(toSave.getTotalElapsedMillis(), entity.getTotalElapsedMillis());
|
||||
assertEquals(toSave.isWorkChunksPurged(), entity.getWorkChunksPurged());
|
||||
assertEquals(toSave.getProgress(), entity.getProgress());
|
||||
assertEquals(toSave.getErrorMessage(), entity.getErrorMessage());
|
||||
assertEquals(toSave.getErrorCount(), entity.getErrorCount());
|
||||
assertEquals(toSave.getEstimatedTimeRemaining(), entity.getEstimatedTimeRemaining());
|
||||
assertEquals(toSave.getCurrentGatedStepId(), entity.getCurrentGatedStepId());
|
||||
assertEquals(toSave.getReport(), entity.getReport());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void updateInstance_invalidId_throwsIllegalArgumentException() {
|
||||
// setup
|
||||
JobInstance instance = createJobInstanceWithDemoData();
|
||||
|
||||
// when
|
||||
when(myJobInstanceRepository.findById(anyString()))
|
||||
.thenReturn(Optional.empty());
|
||||
|
||||
// test
|
||||
try {
|
||||
mySvc.updateInstance(instance);
|
||||
fail();
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertTrue(ex.getMessage().contains("Unknown instance ID: " + instance.getInstanceId()));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void fetchAllWorkChunksIterator_withValidIdAndBoolToSayToIncludeData_returnsPagingIterator() {
|
||||
// setup
|
||||
String instanceId = "instanceId";
|
||||
String jobDefinition = "definitionId";
|
||||
int version = 1;
|
||||
String targetStep = "step";
|
||||
|
||||
List<Batch2WorkChunkEntity> workChunkEntityList = new ArrayList<>();
|
||||
Batch2WorkChunkEntity chunk1 = new Batch2WorkChunkEntity();
|
||||
chunk1.setId("id1");
|
||||
chunk1.setJobDefinitionVersion(version);
|
||||
chunk1.setJobDefinitionId(jobDefinition);
|
||||
chunk1.setSerializedData("serialized data 1");
|
||||
chunk1.setTargetStepId(targetStep);
|
||||
workChunkEntityList.add(chunk1);
|
||||
Batch2WorkChunkEntity chunk2 = new Batch2WorkChunkEntity();
|
||||
chunk2.setId("id2");
|
||||
chunk2.setSerializedData("serialized data 2");
|
||||
chunk2.setJobDefinitionId(jobDefinition);
|
||||
chunk2.setJobDefinitionVersion(version);
|
||||
chunk2.setTargetStepId(targetStep);
|
||||
workChunkEntityList.add(chunk2);
|
||||
|
||||
for (boolean includeData : new boolean[] { true , false }) {
|
||||
// when
|
||||
when(myWorkChunkRepository.fetchChunks(any(PageRequest.class), eq(instanceId)))
|
||||
.thenReturn(workChunkEntityList);
|
||||
|
||||
// test
|
||||
Iterator<WorkChunk> chunkIterator = mySvc.fetchAllWorkChunksIterator(instanceId, includeData);
|
||||
|
||||
// verify
|
||||
assertTrue(chunkIterator instanceof PagingIterator);
|
||||
verify(myWorkChunkRepository, never())
|
||||
.fetchChunks(any(PageRequest.class), anyString());
|
||||
|
||||
// now try the iterator out...
|
||||
WorkChunk chunk = chunkIterator.next();
|
||||
assertEquals(chunk1.getId(), chunk.getId());
|
||||
if (includeData) {
|
||||
assertEquals(chunk1.getSerializedData(), chunk.getData());
|
||||
} else {
|
||||
assertNull(chunk.getData());
|
||||
}
|
||||
chunk = chunkIterator.next();
|
||||
assertEquals(chunk2.getId(), chunk.getId());
|
||||
if (includeData) {
|
||||
assertEquals(chunk2.getSerializedData(), chunk.getData());
|
||||
} else {
|
||||
assertNull(chunk.getData());
|
||||
}
|
||||
|
||||
verify(myWorkChunkRepository)
|
||||
.fetchChunks(any(PageRequest.class), eq(instanceId));
|
||||
|
||||
reset(myWorkChunkRepository);
|
||||
}
|
||||
.deleteById(jobid);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -301,7 +176,7 @@ class JpaJobPersistenceImplTest {
|
|||
JobInstance instance = createJobInstanceFromEntity(entity);
|
||||
|
||||
// when
|
||||
when(myJobInstanceRepository.findById(eq(instance.getInstanceId())))
|
||||
when(myJobInstanceRepository.findById(instance.getInstanceId()))
|
||||
.thenReturn(Optional.of(entity));
|
||||
|
||||
// test
|
||||
|
@ -312,10 +187,6 @@ class JpaJobPersistenceImplTest {
|
|||
assertEquals(instance.getInstanceId(), retInstance.get().getInstanceId());
|
||||
}
|
||||
|
||||
private JobInstance createJobInstanceWithDemoData() {
|
||||
return createJobInstanceFromEntity(createBatch2JobInstanceEntity());
|
||||
}
|
||||
|
||||
private JobInstance createJobInstanceFromEntity(Batch2JobInstanceEntity theEntity) {
|
||||
return JobInstanceUtil.fromEntityToInstance(theEntity);
|
||||
}
|
||||
|
@ -323,8 +194,8 @@ class JpaJobPersistenceImplTest {
|
|||
private Batch2JobInstanceEntity createBatch2JobInstanceEntity() {
|
||||
Batch2JobInstanceEntity entity = new Batch2JobInstanceEntity();
|
||||
entity.setId("id");
|
||||
entity.setStartTime(new Date(2000, 1, 2));
|
||||
entity.setEndTime(new Date(2000, 2, 3));
|
||||
entity.setStartTime(Date.from(LocalDate.of(2000, 1, 2).atStartOfDay().toInstant(ZoneOffset.UTC)));
|
||||
entity.setEndTime(Date.from(LocalDate.of(2000, 2, 3).atStartOfDay().toInstant(ZoneOffset.UTC)));
|
||||
entity.setStatus(StatusEnum.COMPLETED);
|
||||
entity.setCancelled(true);
|
||||
entity.setFastTracking(true);
|
||||
|
|
|
@ -1,22 +1,17 @@
|
|||
package ca.uhn.fhir.jpa.search;
|
||||
|
||||
import ca.uhn.fhir.jpa.dao.tx.HapiTransactionService;
|
||||
import ca.uhn.fhir.rest.api.server.RequestDetails;
|
||||
import ca.uhn.fhir.rest.api.server.storage.TransactionDetails;
|
||||
import org.springframework.transaction.annotation.Isolation;
|
||||
import org.springframework.transaction.annotation.Propagation;
|
||||
import org.springframework.transaction.support.SimpleTransactionStatus;
|
||||
import org.springframework.transaction.support.TransactionCallback;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import javax.annotation.Nullable;
|
||||
import java.util.concurrent.Callable;
|
||||
|
||||
public class MockHapiTransactionService extends HapiTransactionService {
|
||||
|
||||
@Nullable
|
||||
@Override
|
||||
protected <T> T doExecute(ExecutionBuilder theExecutionBuilder, TransactionCallback<T> theCallback) {
|
||||
return theCallback.doInTransaction(null);
|
||||
return theCallback.doInTransaction(new SimpleTransactionStatus());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -432,6 +432,7 @@ public class Batch2CoordinatorIT extends BaseJpaR4Test {
|
|||
JobInstanceStartRequest request = buildRequest(jobDefId);
|
||||
|
||||
// execute
|
||||
ourLog.info("Starting job");
|
||||
myFirstStepLatch.setExpectedCount(1);
|
||||
Batch2JobStartResponse startResponse = myJobCoordinator.startInstance(request);
|
||||
String instanceId = startResponse.getInstanceId();
|
||||
|
@ -441,7 +442,9 @@ public class Batch2CoordinatorIT extends BaseJpaR4Test {
|
|||
myBatch2JobHelper.awaitJobInProgress(instanceId);
|
||||
|
||||
// execute
|
||||
ourLog.info("Cancel job {}", instanceId);
|
||||
myJobCoordinator.cancelInstance(instanceId);
|
||||
ourLog.info("Cancel job {} done", instanceId);
|
||||
|
||||
// validate
|
||||
myBatch2JobHelper.awaitJobCancelled(instanceId);
|
||||
|
@ -488,7 +491,7 @@ public class Batch2CoordinatorIT extends BaseJpaR4Test {
|
|||
myFirstStepLatch.setExpectedCount(1);
|
||||
Batch2JobStartResponse response = myJobCoordinator.startInstance(request);
|
||||
JobInstance instance = myBatch2JobHelper.awaitJobHasStatus(response.getInstanceId(),
|
||||
12, // we want to wait a long time (2 min here) cause backoff is incremental
|
||||
30, // we want to wait a long time (2 min here) cause backoff is incremental
|
||||
StatusEnum.FAILED
|
||||
);
|
||||
|
||||
|
|
|
@ -0,0 +1,62 @@
|
|||
package ca.uhn.fhir.jpa.batch2;
|
||||
|
||||
import ca.uhn.fhir.batch2.model.StatusEnum;
|
||||
import ca.uhn.fhir.jpa.dao.data.IBatch2JobInstanceRepository;
|
||||
import ca.uhn.fhir.jpa.entity.Batch2JobInstanceEntity;
|
||||
import ca.uhn.fhir.jpa.test.BaseJpaR4Test;
|
||||
import org.junit.jupiter.params.ParameterizedTest;
|
||||
import org.junit.jupiter.params.provider.CsvSource;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Date;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
|
||||
public class Batch2JobInstanceRepositoryTest extends BaseJpaR4Test {
|
||||
|
||||
@Autowired
|
||||
IBatch2JobInstanceRepository myBatch2JobInstanceRepository;
|
||||
|
||||
@ParameterizedTest
|
||||
@CsvSource({
|
||||
"QUEUED, FAILED, QUEUED, true, normal transition",
|
||||
"IN_PROGRESS, FAILED, QUEUED IN_PROGRESS ERRORED, true, normal transition with multiple prior",
|
||||
"IN_PROGRESS, IN_PROGRESS, IN_PROGRESS, true, self transition to same state",
|
||||
"QUEUED, QUEUED, QUEUED, true, normal transition",
|
||||
"QUEUED, FAILED, IN_PROGRESS, false, blocked transition"
|
||||
})
|
||||
void updateInstance_toState_fromState_whenAllowed(StatusEnum theCurrentState, StatusEnum theTargetState, String theAllowedPriorStatesString, boolean theExpectedSuccessFlag) {
|
||||
Set<StatusEnum> theAllowedPriorStates = Arrays.stream(theAllowedPriorStatesString.trim().split(" +")).map(StatusEnum::valueOf).collect(Collectors.toSet());
|
||||
// given
|
||||
Batch2JobInstanceEntity entity = new Batch2JobInstanceEntity();
|
||||
String jobId = UUID.randomUUID().toString();
|
||||
entity.setId(jobId);
|
||||
entity.setStatus(theCurrentState);
|
||||
entity.setCreateTime(new Date());
|
||||
entity.setDefinitionId("definition_id");
|
||||
myBatch2JobInstanceRepository.save(entity);
|
||||
|
||||
// when
|
||||
int changeCount =
|
||||
runInTransaction(()->
|
||||
myBatch2JobInstanceRepository.updateInstanceStatusIfIn(jobId, theTargetState, theAllowedPriorStates));
|
||||
|
||||
// then
|
||||
Batch2JobInstanceEntity readBack = runInTransaction(() ->
|
||||
myBatch2JobInstanceRepository.findById(jobId).orElseThrow());
|
||||
if (theExpectedSuccessFlag) {
|
||||
assertEquals(1, changeCount, "The change happened");
|
||||
assertEquals(theTargetState, readBack.getStatus());
|
||||
} else {
|
||||
assertEquals(0, changeCount, "The change did not happened");
|
||||
assertEquals(theCurrentState, readBack.getStatus());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -0,0 +1,552 @@
|
|||
package ca.uhn.fhir.jpa.batch2;
|
||||
|
||||
import ca.uhn.fhir.batch2.api.IJobMaintenanceService;
|
||||
import ca.uhn.fhir.batch2.api.IJobPersistence;
|
||||
import ca.uhn.fhir.batch2.api.IJobStepWorker;
|
||||
import ca.uhn.fhir.batch2.api.RunOutcome;
|
||||
import ca.uhn.fhir.batch2.api.VoidModel;
|
||||
import ca.uhn.fhir.batch2.coordinator.JobDefinitionRegistry;
|
||||
import ca.uhn.fhir.batch2.maintenance.JobMaintenanceServiceImpl;
|
||||
import ca.uhn.fhir.batch2.model.JobDefinition;
|
||||
import ca.uhn.fhir.batch2.model.JobInstance;
|
||||
import ca.uhn.fhir.batch2.model.JobWorkNotification;
|
||||
import ca.uhn.fhir.batch2.model.JobWorkNotificationJsonMessage;
|
||||
import ca.uhn.fhir.batch2.model.StatusEnum;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunkStatusEnum;
|
||||
import ca.uhn.fhir.interceptor.api.HookParams;
|
||||
import ca.uhn.fhir.jpa.dao.data.IBatch2JobInstanceRepository;
|
||||
import ca.uhn.fhir.jpa.dao.data.IBatch2WorkChunkRepository;
|
||||
import ca.uhn.fhir.jpa.entity.Batch2JobInstanceEntity;
|
||||
import ca.uhn.fhir.jpa.entity.Batch2WorkChunkEntity;
|
||||
import ca.uhn.fhir.jpa.subscription.channel.api.ChannelConsumerSettings;
|
||||
import ca.uhn.fhir.jpa.subscription.channel.api.ChannelProducerSettings;
|
||||
import ca.uhn.fhir.jpa.subscription.channel.api.IChannelFactory;
|
||||
import ca.uhn.fhir.jpa.subscription.channel.impl.LinkedBlockingChannel;
|
||||
import ca.uhn.fhir.jpa.test.BaseJpaR4Test;
|
||||
import ca.uhn.fhir.model.api.IModelJson;
|
||||
import ca.uhn.fhir.util.JsonUtil;
|
||||
import ca.uhn.test.concurrency.IPointcutLatch;
|
||||
import ca.uhn.test.concurrency.PointcutLatch;
|
||||
import org.junit.jupiter.api.AfterEach;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Disabled;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.MessageChannel;
|
||||
import org.springframework.messaging.support.ChannelInterceptor;
|
||||
import org.springframework.transaction.support.TransactionTemplate;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
import static ca.uhn.fhir.batch2.config.BaseBatch2Config.CHANNEL_NAME;
|
||||
import static org.apache.commons.lang3.StringUtils.isBlank;
|
||||
import static org.hamcrest.MatcherAssert.assertThat;
|
||||
import static org.hamcrest.Matchers.containsInAnyOrder;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
|
||||
public class Batch2JobMaintenanceDatabaseIT extends BaseJpaR4Test {
|
||||
private static final Logger ourLog = LoggerFactory.getLogger(Batch2JobMaintenanceDatabaseIT.class);
|
||||
|
||||
public static final int TEST_JOB_VERSION = 1;
|
||||
public static final String FIRST = "FIRST";
|
||||
public static final String SECOND = "SECOND";
|
||||
public static final String LAST = "LAST";
|
||||
private static final String JOB_DEF_ID = "test-job-definition";
|
||||
private static final JobDefinition<? extends IModelJson> ourJobDef = buildJobDefinition();
|
||||
private static final String TEST_INSTANCE_ID = "test-instance-id";
|
||||
|
||||
@Autowired
|
||||
JobDefinitionRegistry myJobDefinitionRegistry;
|
||||
@Autowired
|
||||
IJobMaintenanceService myJobMaintenanceService;
|
||||
@Autowired
|
||||
private IChannelFactory myChannelFactory;
|
||||
|
||||
@Autowired
|
||||
IJobPersistence myJobPersistence;
|
||||
@Autowired
|
||||
IBatch2JobInstanceRepository myJobInstanceRepository;
|
||||
@Autowired
|
||||
IBatch2WorkChunkRepository myWorkChunkRepository;
|
||||
|
||||
private LinkedBlockingChannel myWorkChannel;
|
||||
private final List<StackTraceElement[]> myStackTraceElements = new ArrayList<>();
|
||||
private TransactionTemplate myTxTemplate;
|
||||
private final MyChannelInterceptor myChannelInterceptor = new MyChannelInterceptor();
|
||||
|
||||
@BeforeEach
|
||||
public void before() {
|
||||
myWorkChunkRepository.deleteAll();
|
||||
myJobInstanceRepository.deleteAll();
|
||||
|
||||
myJobDefinitionRegistry.addJobDefinition(ourJobDef);
|
||||
myWorkChannel = (LinkedBlockingChannel) myChannelFactory.getOrCreateProducer(CHANNEL_NAME, JobWorkNotificationJsonMessage.class, new ChannelProducerSettings());
|
||||
JobMaintenanceServiceImpl jobMaintenanceService = (JobMaintenanceServiceImpl) myJobMaintenanceService;
|
||||
jobMaintenanceService.setMaintenanceJobStartedCallback(() -> {
|
||||
ourLog.info("Batch maintenance job started");
|
||||
myStackTraceElements.add(Thread.currentThread().getStackTrace());
|
||||
});
|
||||
|
||||
myTxTemplate = new TransactionTemplate(myTxManager);
|
||||
storeNewInstance(ourJobDef);
|
||||
|
||||
myWorkChannel = (LinkedBlockingChannel) myChannelFactory.getOrCreateReceiver(CHANNEL_NAME, JobWorkNotificationJsonMessage.class, new ChannelConsumerSettings());
|
||||
myChannelInterceptor.clear();
|
||||
myWorkChannel.addInterceptor(myChannelInterceptor);
|
||||
}
|
||||
|
||||
@AfterEach
|
||||
public void after() {
|
||||
ourLog.debug("Maintenance traces: {}", myStackTraceElements);
|
||||
myWorkChannel.clearInterceptorsForUnitTest();
|
||||
JobMaintenanceServiceImpl jobMaintenanceService = (JobMaintenanceServiceImpl) myJobMaintenanceService;
|
||||
jobMaintenanceService.setMaintenanceJobStartedCallback(() -> {
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
public void runMaintenancePass_noChunks_noChange() {
|
||||
assertInstanceCount(1);
|
||||
myJobMaintenanceService.runMaintenancePass();
|
||||
assertInstanceCount(1);
|
||||
|
||||
assertInstanceStatus(StatusEnum.IN_PROGRESS);
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void runMaintenancePass_SingleQueuedChunk_noChange() {
|
||||
WorkChunkExpectation expectation = new WorkChunkExpectation(
|
||||
"chunk1, FIRST, QUEUED",
|
||||
""
|
||||
);
|
||||
|
||||
expectation.storeChunks();
|
||||
myJobMaintenanceService.runMaintenancePass();
|
||||
expectation.assertNotifications();
|
||||
|
||||
assertInstanceStatus(StatusEnum.IN_PROGRESS);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void runMaintenancePass_SingleInProgressChunk_noChange() {
|
||||
WorkChunkExpectation expectation = new WorkChunkExpectation(
|
||||
"chunk1, FIRST, IN_PROGRESS",
|
||||
""
|
||||
);
|
||||
|
||||
expectation.storeChunks();
|
||||
myJobMaintenanceService.runMaintenancePass();
|
||||
expectation.assertNotifications();
|
||||
|
||||
assertInstanceStatus(StatusEnum.IN_PROGRESS);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void runMaintenancePass_SingleCompleteChunk_notifiesAndChangesGatedStep() throws InterruptedException {
|
||||
assertCurrentGatedStep(FIRST);
|
||||
|
||||
WorkChunkExpectation expectation = new WorkChunkExpectation(
|
||||
"""
|
||||
chunk1, FIRST, COMPLETED
|
||||
chunk2, SECOND, QUEUED
|
||||
""",
|
||||
"""
|
||||
chunk2
|
||||
"""
|
||||
);
|
||||
|
||||
expectation.storeChunks();
|
||||
myChannelInterceptor.setExpectedCount(1);
|
||||
myJobMaintenanceService.runMaintenancePass();
|
||||
myChannelInterceptor.awaitExpected();
|
||||
expectation.assertNotifications();
|
||||
|
||||
assertInstanceStatus(StatusEnum.IN_PROGRESS);
|
||||
assertCurrentGatedStep(SECOND);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void runMaintenancePass_DoubleCompleteChunk_notifiesAndChangesGatedStep() throws InterruptedException {
|
||||
assertCurrentGatedStep(FIRST);
|
||||
|
||||
WorkChunkExpectation expectation = new WorkChunkExpectation(
|
||||
"""
|
||||
chunk1, FIRST, COMPLETED
|
||||
chunk2, FIRST, COMPLETED
|
||||
chunk3, SECOND, QUEUED
|
||||
chunk4, SECOND, QUEUED
|
||||
""", """
|
||||
chunk3
|
||||
chunk4
|
||||
"""
|
||||
);
|
||||
|
||||
expectation.storeChunks();
|
||||
myChannelInterceptor.setExpectedCount(2);
|
||||
myJobMaintenanceService.runMaintenancePass();
|
||||
myChannelInterceptor.awaitExpected();
|
||||
expectation.assertNotifications();
|
||||
|
||||
assertInstanceStatus(StatusEnum.IN_PROGRESS);
|
||||
assertCurrentGatedStep(SECOND);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void runMaintenancePass_DoubleIncompleteChunk_noChange() {
|
||||
assertCurrentGatedStep(FIRST);
|
||||
|
||||
WorkChunkExpectation expectation = new WorkChunkExpectation(
|
||||
"""
|
||||
chunk1, FIRST, COMPLETED
|
||||
chunk2, FIRST, IN_PROGRESS
|
||||
chunk3, SECOND, QUEUED
|
||||
chunk4, SECOND, QUEUED
|
||||
""",
|
||||
""
|
||||
);
|
||||
|
||||
expectation.storeChunks();
|
||||
myJobMaintenanceService.runMaintenancePass();
|
||||
expectation.assertNotifications();
|
||||
|
||||
assertInstanceStatus(StatusEnum.IN_PROGRESS);
|
||||
assertCurrentGatedStep(FIRST);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void runMaintenancePass_allStepsComplete_jobCompletes() {
|
||||
assertCurrentGatedStep(FIRST);
|
||||
|
||||
WorkChunkExpectation expectation = new WorkChunkExpectation(
|
||||
"""
|
||||
chunk1, FIRST, COMPLETED
|
||||
chunk3, SECOND, COMPLETED
|
||||
chunk4, SECOND, COMPLETED
|
||||
chunk5, SECOND, COMPLETED
|
||||
chunk6, SECOND, COMPLETED
|
||||
chunk7, LAST, COMPLETED
|
||||
""",
|
||||
""
|
||||
);
|
||||
|
||||
expectation.storeChunks();
|
||||
|
||||
|
||||
myJobMaintenanceService.runMaintenancePass();
|
||||
|
||||
|
||||
expectation.assertNotifications();
|
||||
|
||||
assertInstanceStatus(StatusEnum.COMPLETED);
|
||||
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* If the first step doesn't produce any work chunks, then
|
||||
* the instance should be marked as complete right away.
|
||||
*/
|
||||
@Test
|
||||
public void testPerformStep_FirstStep_NoWorkChunksProduced() {
|
||||
assertCurrentGatedStep(FIRST);
|
||||
|
||||
WorkChunkExpectation expectation = new WorkChunkExpectation(
|
||||
"""
|
||||
chunk1, FIRST, COMPLETED
|
||||
""",
|
||||
""
|
||||
);
|
||||
|
||||
expectation.storeChunks();
|
||||
|
||||
myJobMaintenanceService.runMaintenancePass();
|
||||
myJobMaintenanceService.runMaintenancePass();
|
||||
myJobMaintenanceService.runMaintenancePass();
|
||||
myJobMaintenanceService.runMaintenancePass();
|
||||
|
||||
expectation.assertNotifications();
|
||||
|
||||
assertInstanceStatus(StatusEnum.COMPLETED);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Once all chunks are complete, the job should complete even if a step has no work.
|
||||
* the instance should be marked as complete right away.
|
||||
*/
|
||||
@Test
|
||||
public void testPerformStep_secondStep_NoWorkChunksProduced() {
|
||||
assertCurrentGatedStep(FIRST);
|
||||
|
||||
WorkChunkExpectation expectation = new WorkChunkExpectation(
|
||||
"""
|
||||
chunk1, FIRST, COMPLETED
|
||||
chunk3, SECOND, COMPLETED
|
||||
chunk4, SECOND, COMPLETED
|
||||
""",
|
||||
""
|
||||
);
|
||||
|
||||
expectation.storeChunks();
|
||||
|
||||
myJobMaintenanceService.runMaintenancePass();
|
||||
myJobMaintenanceService.runMaintenancePass();
|
||||
myJobMaintenanceService.runMaintenancePass();
|
||||
|
||||
expectation.assertNotifications();
|
||||
|
||||
assertInstanceStatus(StatusEnum.COMPLETED);
|
||||
}
|
||||
|
||||
// TODO MB Ken and Nathan created these. Do we want to make them real?
|
||||
@Test
|
||||
@Disabled("future plans")
|
||||
public void runMaintenancePass_MultipleStepsInProgress_CancelsInstance() {
|
||||
assertCurrentGatedStep(FIRST);
|
||||
|
||||
WorkChunkExpectation expectation = new WorkChunkExpectation(
|
||||
"""
|
||||
chunk1, FIRST, IN_PROGRESS
|
||||
chunk2, SECOND, IN_PROGRESS
|
||||
""",
|
||||
""
|
||||
);
|
||||
|
||||
expectation.storeChunks();
|
||||
myJobMaintenanceService.runMaintenancePass();
|
||||
expectation.assertNotifications();
|
||||
|
||||
assertInstanceStatus(StatusEnum.FAILED);
|
||||
assertError("IN_PROGRESS Chunks found in both the FIRST and SECOND step.");
|
||||
}
|
||||
|
||||
@Test
|
||||
@Disabled("future plans")
|
||||
public void runMaintenancePass_MultipleOtherStepsInProgress_CancelsInstance() {
|
||||
assertCurrentGatedStep(FIRST);
|
||||
|
||||
WorkChunkExpectation expectation = new WorkChunkExpectation(
|
||||
"""
|
||||
chunk1, SECOND, IN_PROGRESS
|
||||
chunk2, LAST, IN_PROGRESS
|
||||
""",
|
||||
""
|
||||
);
|
||||
|
||||
expectation.storeChunks();
|
||||
myJobMaintenanceService.runMaintenancePass();
|
||||
expectation.assertNotifications();
|
||||
|
||||
assertInstanceStatus(StatusEnum.FAILED);
|
||||
assertError("IN_PROGRESS Chunks found both the SECOND and LAST step.");
|
||||
}
|
||||
|
||||
@Test
|
||||
@Disabled("future plans")
|
||||
public void runMaintenancePass_MultipleStepsQueued_CancelsInstance() {
|
||||
assertCurrentGatedStep(FIRST);
|
||||
|
||||
WorkChunkExpectation expectation = new WorkChunkExpectation(
|
||||
"""
|
||||
chunk1, FIRST, COMPLETED
|
||||
chunk2, SECOND, QUEUED
|
||||
chunk3, LAST, QUEUED
|
||||
""",
|
||||
""
|
||||
);
|
||||
|
||||
expectation.storeChunks();
|
||||
myJobMaintenanceService.runMaintenancePass();
|
||||
expectation.assertNotifications();
|
||||
|
||||
assertInstanceStatus(StatusEnum.FAILED);
|
||||
assertError("QUEUED Chunks found in both the SECOND and LAST step.");
|
||||
}
|
||||
|
||||
private void assertError(String theExpectedErrorMessage) {
|
||||
Optional<Batch2JobInstanceEntity> instance = myJobInstanceRepository.findById(TEST_INSTANCE_ID);
|
||||
assertTrue(instance.isPresent());
|
||||
assertEquals(theExpectedErrorMessage, instance.get().getErrorMessage());
|
||||
}
|
||||
|
||||
|
||||
private void assertCurrentGatedStep(String theNextStepId) {
|
||||
Optional<JobInstance> instance = myJobPersistence.fetchInstance(TEST_INSTANCE_ID);
|
||||
assertTrue(instance.isPresent());
|
||||
assertEquals(theNextStepId, instance.get().getCurrentGatedStepId());
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
private static Batch2WorkChunkEntity buildWorkChunkEntity(String theChunkId, String theStepId, WorkChunkStatusEnum theStatus) {
|
||||
Batch2WorkChunkEntity workChunk = new Batch2WorkChunkEntity();
|
||||
workChunk.setId(theChunkId);
|
||||
workChunk.setJobDefinitionId(JOB_DEF_ID);
|
||||
workChunk.setStatus(theStatus);
|
||||
workChunk.setJobDefinitionVersion(TEST_JOB_VERSION);
|
||||
workChunk.setCreateTime(new Date());
|
||||
workChunk.setInstanceId(TEST_INSTANCE_ID);
|
||||
workChunk.setTargetStepId(theStepId);
|
||||
if (!theStatus.isIncomplete()) {
|
||||
workChunk.setEndTime(new Date());
|
||||
}
|
||||
|
||||
return workChunk;
|
||||
}
|
||||
|
||||
|
||||
@Nonnull
|
||||
private static JobDefinition<? extends IModelJson> buildJobDefinition() {
|
||||
IJobStepWorker<TestJobParameters, VoidModel, FirstStepOutput> firstStep = (step, sink) -> {
|
||||
ourLog.info("First step for chunk {}", step.getChunkId());
|
||||
return RunOutcome.SUCCESS;
|
||||
};
|
||||
IJobStepWorker<TestJobParameters, FirstStepOutput, SecondStepOutput> secondStep = (step, sink) -> {
|
||||
ourLog.info("Second step for chunk {}", step.getChunkId());
|
||||
return RunOutcome.SUCCESS;
|
||||
};
|
||||
IJobStepWorker<TestJobParameters, SecondStepOutput, VoidModel> lastStep = (step, sink) -> {
|
||||
ourLog.info("Last step for chunk {}", step.getChunkId());
|
||||
return RunOutcome.SUCCESS;
|
||||
};
|
||||
|
||||
JobDefinition<? extends IModelJson> definition = buildGatedJobDefinition(firstStep, secondStep, lastStep);
|
||||
return definition;
|
||||
}
|
||||
|
||||
private void storeNewInstance(JobDefinition<? extends IModelJson> theJobDefinition) {
|
||||
Batch2JobInstanceEntity entity = new Batch2JobInstanceEntity();
|
||||
entity.setId(TEST_INSTANCE_ID);
|
||||
entity.setStatus(StatusEnum.IN_PROGRESS);
|
||||
entity.setDefinitionId(theJobDefinition.getJobDefinitionId());
|
||||
entity.setDefinitionVersion(theJobDefinition.getJobDefinitionVersion());
|
||||
entity.setParams(JsonUtil.serializeOrInvalidRequest(new TestJobParameters()));
|
||||
entity.setCurrentGatedStepId(FIRST);
|
||||
entity.setCreateTime(new Date());
|
||||
|
||||
myTxTemplate.executeWithoutResult(t -> myJobInstanceRepository.save(entity));
|
||||
}
|
||||
|
||||
private void assertInstanceCount(int size) {
|
||||
assertThat(myJobPersistence.fetchInstancesByJobDefinitionId(JOB_DEF_ID, 100, 0), hasSize(size));
|
||||
}
|
||||
|
||||
|
||||
private void assertInstanceStatus(StatusEnum theInProgress) {
|
||||
Optional<Batch2JobInstanceEntity> instance = myJobInstanceRepository.findById(TEST_INSTANCE_ID);
|
||||
assertTrue(instance.isPresent());
|
||||
assertEquals(theInProgress, instance.get().getStatus());
|
||||
}
|
||||
@Nonnull
|
||||
private static JobDefinition<? extends IModelJson> buildGatedJobDefinition(IJobStepWorker<TestJobParameters, VoidModel, FirstStepOutput> theFirstStep, IJobStepWorker<TestJobParameters, FirstStepOutput, SecondStepOutput> theSecondStep, IJobStepWorker<TestJobParameters, SecondStepOutput, VoidModel> theLastStep) {
|
||||
return JobDefinition.newBuilder()
|
||||
.setJobDefinitionId(JOB_DEF_ID)
|
||||
.setJobDescription("test job")
|
||||
.setJobDefinitionVersion(TEST_JOB_VERSION)
|
||||
.setParametersType(TestJobParameters.class)
|
||||
.gatedExecution()
|
||||
.addFirstStep(
|
||||
FIRST,
|
||||
"Test first step",
|
||||
FirstStepOutput.class,
|
||||
theFirstStep
|
||||
)
|
||||
.addIntermediateStep(
|
||||
SECOND,
|
||||
"Test second step",
|
||||
SecondStepOutput.class,
|
||||
theSecondStep
|
||||
)
|
||||
.addLastStep(
|
||||
LAST,
|
||||
"Test last step",
|
||||
theLastStep
|
||||
)
|
||||
.completionHandler(details -> {
|
||||
})
|
||||
.build();
|
||||
}
|
||||
|
||||
static class TestJobParameters implements IModelJson {
|
||||
TestJobParameters() {
|
||||
}
|
||||
}
|
||||
|
||||
static class FirstStepOutput implements IModelJson {
|
||||
FirstStepOutput() {
|
||||
}
|
||||
}
|
||||
|
||||
static class SecondStepOutput implements IModelJson {
|
||||
SecondStepOutput() {
|
||||
}
|
||||
}
|
||||
|
||||
private class WorkChunkExpectation {
|
||||
private final List<Batch2WorkChunkEntity> myInputChunks = new ArrayList<>();
|
||||
private final List<String> myExpectedChunkIdNotifications = new ArrayList<>();
|
||||
public WorkChunkExpectation(String theInput, String theOutputChunkIds) {
|
||||
String[] inputLines = theInput.split("\n");
|
||||
for (String next : inputLines) {
|
||||
String[] parts = next.split(",");
|
||||
Batch2WorkChunkEntity e = buildWorkChunkEntity(parts[0].trim(), parts[1].trim(), WorkChunkStatusEnum.valueOf(parts[2].trim()));
|
||||
myInputChunks.add(e);
|
||||
}
|
||||
if (!isBlank(theOutputChunkIds)) {
|
||||
String[] outputLines = theOutputChunkIds.split("\n");
|
||||
for (String next : outputLines) {
|
||||
myExpectedChunkIdNotifications.add(next.trim());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void storeChunks() {
|
||||
myTxTemplate.executeWithoutResult(t -> myWorkChunkRepository.saveAll(myInputChunks));
|
||||
}
|
||||
|
||||
public void assertNotifications() {
|
||||
assertThat(myChannelInterceptor.getReceivedChunkIds(), containsInAnyOrder(myExpectedChunkIdNotifications.toArray()));
|
||||
}
|
||||
}
|
||||
|
||||
private static class MyChannelInterceptor implements ChannelInterceptor, IPointcutLatch {
|
||||
PointcutLatch myPointcutLatch = new PointcutLatch("BATCH CHUNK MESSAGE RECEIVED");
|
||||
List<String> myReceivedChunkIds = new ArrayList<>();
|
||||
@Override
|
||||
public Message<?> preSend(@Nonnull Message<?> message, @Nonnull MessageChannel channel) {
|
||||
ourLog.info("Sending message: {}", message);
|
||||
JobWorkNotification notification = ((JobWorkNotificationJsonMessage) message).getPayload();
|
||||
myReceivedChunkIds.add(notification.getChunkId());
|
||||
myPointcutLatch.call(message);
|
||||
return message;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear() {
|
||||
myPointcutLatch.clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setExpectedCount(int count) {
|
||||
myPointcutLatch.setExpectedCount(count);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<HookParams> awaitExpected() throws InterruptedException {
|
||||
return myPointcutLatch.awaitExpected();
|
||||
}
|
||||
|
||||
List<String> getReceivedChunkIds() {
|
||||
return myReceivedChunkIds;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -49,11 +49,6 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
|
|||
* {@link ca.uhn.fhir.jpa.batch2.JpaJobPersistenceImpl#onWorkChunkCreate}
|
||||
* {@link JpaJobPersistenceImpl#onWorkChunkDequeue(String)}
|
||||
* Chunk execution {@link ca.uhn.fhir.batch2.coordinator.StepExecutor#executeStep}
|
||||
* wipmb figure this out
|
||||
state transition triggers.
|
||||
on-enter actions
|
||||
on-exit actions
|
||||
activities in state
|
||||
*/
|
||||
@TestPropertySource(properties = {
|
||||
UnregisterScheduledProcessor.SCHEDULING_DISABLED_EQUALS_FALSE
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
package ca.uhn.fhir.jpa.bulk;
|
||||
package ca.uhn.fhir.jpa.batch2;
|
||||
|
||||
import ca.uhn.fhir.jpa.api.config.JpaStorageSettings;
|
||||
import ca.uhn.fhir.jpa.api.model.BulkExportJobResults;
|
||||
import ca.uhn.fhir.jpa.api.model.BulkExportParameters;
|
||||
import ca.uhn.fhir.jpa.api.svc.IBatch2JobRunner;
|
||||
import ca.uhn.fhir.jpa.batch.models.Batch2JobStartResponse;
|
||||
import ca.uhn.fhir.jpa.provider.BaseResourceProviderR4Test;
|
||||
|
@ -18,6 +19,7 @@ import org.hl7.fhir.r4.model.Group;
|
|||
import org.hl7.fhir.r4.model.IdType;
|
||||
import org.hl7.fhir.r4.model.Patient;
|
||||
import org.junit.jupiter.api.AfterEach;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Disabled;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.slf4j.Logger;
|
||||
|
@ -58,8 +60,14 @@ public class BulkDataErrorAbuseTest extends BaseResourceProviderR4Test {
|
|||
@Autowired
|
||||
private IBatch2JobRunner myJobRunner;
|
||||
|
||||
@BeforeEach
|
||||
void beforeEach() {
|
||||
afterPurgeDatabase();
|
||||
}
|
||||
|
||||
@AfterEach
|
||||
void afterEach() {
|
||||
ourLog.info("BulkDataErrorAbuseTest.afterEach()");
|
||||
myStorageSettings.setIndexMissingFields(JpaStorageSettings.IndexEnabledEnum.DISABLED);
|
||||
}
|
||||
|
||||
|
@ -69,7 +77,7 @@ public class BulkDataErrorAbuseTest extends BaseResourceProviderR4Test {
|
|||
}
|
||||
|
||||
/**
|
||||
* This test is disabled because it never actually exists. Run it if you want to ensure
|
||||
* This test is disabled because it never actually exits. Run it if you want to ensure
|
||||
* that changes to the Bulk Export Batch2 task haven't affected our ability to successfully
|
||||
* run endless parallel jobs. If you run it for a few minutes, and it never stops on its own,
|
||||
* you are good.
|
||||
|
@ -146,8 +154,8 @@ public class BulkDataErrorAbuseTest extends BaseResourceProviderR4Test {
|
|||
}));
|
||||
|
||||
// Don't let the list of futures grow so big we run out of memory
|
||||
if (futures.size() > 200) {
|
||||
while (futures.size() > 100) {
|
||||
if (futures.size() > 1000) {
|
||||
while (futures.size() > 500) {
|
||||
// This should always return true, but it'll throw an exception if we failed
|
||||
assertTrue(futures.remove(0).get());
|
||||
}
|
||||
|
@ -156,6 +164,12 @@ public class BulkDataErrorAbuseTest extends BaseResourceProviderR4Test {
|
|||
|
||||
ourLog.info("Done creating tasks, waiting for task completion");
|
||||
|
||||
// wait for completion to avoid stranding background tasks.
|
||||
executorService.shutdown();
|
||||
assertTrue(executorService.awaitTermination(60, TimeUnit.SECONDS), "Finished before timeout");
|
||||
|
||||
// verify that all requests succeeded
|
||||
ourLog.info("All tasks complete. Verify results.");
|
||||
for (var next : futures) {
|
||||
// This should always return true, but it'll throw an exception if we failed
|
||||
assertTrue(next.get());
|
||||
|
@ -216,7 +230,9 @@ public class BulkDataErrorAbuseTest extends BaseResourceProviderR4Test {
|
|||
}
|
||||
|
||||
private String startJob(BulkDataExportOptions theOptions) {
|
||||
Batch2JobStartResponse startResponse = myJobRunner.startNewJob(BulkExportUtils.createBulkExportJobParametersFromExportOptions(theOptions));
|
||||
BulkExportParameters startRequest = BulkExportUtils.createBulkExportJobParametersFromExportOptions(theOptions);
|
||||
startRequest.setUseExistingJobsFirst(false);
|
||||
Batch2JobStartResponse startResponse = myJobRunner.startNewJob(startRequest);
|
||||
assertNotNull(startResponse);
|
||||
return startResponse.getInstanceId();
|
||||
}
|
|
@ -15,9 +15,9 @@ import ca.uhn.fhir.jpa.dao.data.IBatch2WorkChunkRepository;
|
|||
import ca.uhn.fhir.jpa.entity.Batch2JobInstanceEntity;
|
||||
import ca.uhn.fhir.jpa.entity.Batch2WorkChunkEntity;
|
||||
import ca.uhn.fhir.jpa.test.BaseJpaR4Test;
|
||||
import ca.uhn.fhir.jpa.util.JobInstanceUtil;
|
||||
import ca.uhn.fhir.util.JsonUtil;
|
||||
import ca.uhn.hapi.fhir.batch2.test.AbstractIJobPersistenceSpecificationTest;
|
||||
import com.google.common.collect.Iterators;
|
||||
import org.junit.jupiter.api.MethodOrderer;
|
||||
import org.junit.jupiter.api.Nested;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
@ -30,8 +30,10 @@ import org.springframework.data.domain.PageRequest;
|
|||
import org.springframework.transaction.PlatformTransactionManager;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.time.Instant;
|
||||
import java.time.LocalDateTime;
|
||||
import java.time.ZoneId;
|
||||
import java.time.temporal.ChronoUnit;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Date;
|
||||
import java.util.HashSet;
|
||||
|
@ -62,7 +64,6 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
|
|||
public static final int JOB_DEF_VER = 1;
|
||||
public static final int SEQUENCE_NUMBER = 1;
|
||||
public static final String CHUNK_DATA = "{\"key\":\"value\"}";
|
||||
public static final String INSTANCE_ID = "instance-id";
|
||||
|
||||
@Autowired
|
||||
private IJobPersistence mySvc;
|
||||
|
@ -150,8 +151,8 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
|
|||
|
||||
final String completedId = storeJobInstanceAndUpdateWithEndTime(StatusEnum.COMPLETED, 1);
|
||||
final String failedId = storeJobInstanceAndUpdateWithEndTime(StatusEnum.FAILED, 1);
|
||||
final String erroredId = storeJobInstanceAndUpdateWithEndTime(StatusEnum.ERRORED, 1);
|
||||
final String cancelledId = storeJobInstanceAndUpdateWithEndTime(StatusEnum.CANCELLED, 1);
|
||||
storeJobInstanceAndUpdateWithEndTime(StatusEnum.ERRORED, 1);
|
||||
storeJobInstanceAndUpdateWithEndTime(StatusEnum.QUEUED, 1);
|
||||
storeJobInstanceAndUpdateWithEndTime(StatusEnum.IN_PROGRESS, 1);
|
||||
storeJobInstanceAndUpdateWithEndTime(StatusEnum.FINALIZE, 1);
|
||||
|
@ -165,7 +166,7 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
|
|||
final List<JobInstance> jobInstancesByCutoff =
|
||||
mySvc.fetchInstances(JOB_DEFINITION_ID, StatusEnum.getEndedStatuses(), cutoffDate, PageRequest.of(0, 100));
|
||||
|
||||
assertEquals(Set.of(completedId, failedId, erroredId, cancelledId),
|
||||
assertEquals(Set.of(completedId, failedId, cancelledId),
|
||||
jobInstancesByCutoff.stream()
|
||||
.map(JobInstance::getInstanceId)
|
||||
.collect(Collectors.toUnmodifiableSet()));
|
||||
|
@ -251,14 +252,8 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
|
|||
JobInstance instance = createInstance();
|
||||
String instanceId = mySvc.storeNewInstance(instance);
|
||||
|
||||
runInTransaction(() -> {
|
||||
Batch2JobInstanceEntity instanceEntity = myJobInstanceRepository.findById(instanceId).orElseThrow(IllegalStateException::new);
|
||||
assertEquals(StatusEnum.QUEUED, instanceEntity.getStatus());
|
||||
instanceEntity.setCancelled(true);
|
||||
myJobInstanceRepository.save(instanceEntity);
|
||||
});
|
||||
|
||||
JobOperationResultJson result = mySvc.cancelInstance(instanceId);
|
||||
|
||||
assertTrue(result.getSuccess());
|
||||
assertEquals("Job instance <" + instanceId + "> successfully cancelled.", result.getMessage());
|
||||
|
||||
|
@ -382,6 +377,73 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
|
|||
assertNull(chunk.getData());
|
||||
}
|
||||
|
||||
@Test
|
||||
void testStoreAndFetchChunksForInstance_NoData() {
|
||||
// given
|
||||
JobInstance instance = createInstance();
|
||||
String instanceId = mySvc.storeNewInstance(instance);
|
||||
|
||||
String queuedId = storeWorkChunk(JOB_DEFINITION_ID, TARGET_STEP_ID, instanceId, 0, "some data");
|
||||
String erroredId = storeWorkChunk(JOB_DEFINITION_ID, TARGET_STEP_ID, instanceId, 1, "some more data");
|
||||
String completedId = storeWorkChunk(JOB_DEFINITION_ID, TARGET_STEP_ID, instanceId, 2, "some more data");
|
||||
|
||||
mySvc.onWorkChunkDequeue(erroredId);
|
||||
WorkChunkErrorEvent parameters = new WorkChunkErrorEvent(erroredId, "Our error message");
|
||||
mySvc.onWorkChunkError(parameters);
|
||||
|
||||
mySvc.onWorkChunkDequeue(completedId);
|
||||
mySvc.onWorkChunkCompletion(new WorkChunkCompletionEvent(completedId, 11, 0));
|
||||
|
||||
// when
|
||||
Iterator<WorkChunk> workChunks = mySvc.fetchAllWorkChunksIterator(instanceId, false);
|
||||
|
||||
// then
|
||||
ArrayList<WorkChunk> chunks = new ArrayList<>();
|
||||
Iterators.addAll(chunks, workChunks);
|
||||
assertEquals(3, chunks.size());
|
||||
|
||||
{
|
||||
WorkChunk workChunk = chunks.get(0);
|
||||
assertNull(workChunk.getData(), "we skip the data");
|
||||
assertEquals(queuedId, workChunk.getId());
|
||||
assertEquals(JOB_DEFINITION_ID, workChunk.getJobDefinitionId());
|
||||
assertEquals(JOB_DEF_VER, workChunk.getJobDefinitionVersion());
|
||||
assertEquals(instanceId, workChunk.getInstanceId());
|
||||
assertEquals(TARGET_STEP_ID, workChunk.getTargetStepId());
|
||||
assertEquals(0, workChunk.getSequence());
|
||||
assertEquals(WorkChunkStatusEnum.QUEUED, workChunk.getStatus());
|
||||
|
||||
|
||||
assertNotNull(workChunk.getCreateTime());
|
||||
assertNotNull(workChunk.getStartTime());
|
||||
assertNotNull(workChunk.getUpdateTime());
|
||||
assertNull(workChunk.getEndTime());
|
||||
assertNull(workChunk.getErrorMessage());
|
||||
assertEquals(0, workChunk.getErrorCount());
|
||||
assertEquals(null, workChunk.getRecordsProcessed());
|
||||
}
|
||||
|
||||
{
|
||||
WorkChunk workChunk1 = chunks.get(1);
|
||||
assertEquals(WorkChunkStatusEnum.ERRORED, workChunk1.getStatus());
|
||||
assertEquals("Our error message", workChunk1.getErrorMessage());
|
||||
assertEquals(1, workChunk1.getErrorCount());
|
||||
assertEquals(null, workChunk1.getRecordsProcessed());
|
||||
assertNotNull(workChunk1.getEndTime());
|
||||
}
|
||||
|
||||
{
|
||||
WorkChunk workChunk2 = chunks.get(2);
|
||||
assertEquals(WorkChunkStatusEnum.COMPLETED, workChunk2.getStatus());
|
||||
assertNotNull(workChunk2.getEndTime());
|
||||
assertEquals(11, workChunk2.getRecordsProcessed());
|
||||
assertNull(workChunk2.getErrorMessage());
|
||||
assertEquals(0, workChunk2.getErrorCount());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testStoreAndFetchWorkChunk_WithData() {
|
||||
JobInstance instance = createInstance();
|
||||
|
@ -567,41 +629,6 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
|
|||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUpdateInstance() {
|
||||
String instanceId = mySvc.storeNewInstance(createInstance());
|
||||
|
||||
JobInstance instance = mySvc.fetchInstance(instanceId).orElseThrow(IllegalArgumentException::new);
|
||||
assertEquals(instanceId, instance.getInstanceId());
|
||||
assertFalse(instance.isWorkChunksPurged());
|
||||
|
||||
instance.setStartTime(new Date());
|
||||
sleepUntilTimeChanges();
|
||||
instance.setEndTime(new Date());
|
||||
instance.setCombinedRecordsProcessed(100);
|
||||
instance.setCombinedRecordsProcessedPerSecond(22.0);
|
||||
instance.setWorkChunksPurged(true);
|
||||
instance.setProgress(0.5d);
|
||||
instance.setErrorCount(3);
|
||||
instance.setEstimatedTimeRemaining("32d");
|
||||
|
||||
mySvc.updateInstance(instance);
|
||||
|
||||
runInTransaction(() -> {
|
||||
Batch2JobInstanceEntity entity = myJobInstanceRepository.findById(instanceId).orElseThrow(IllegalArgumentException::new);
|
||||
assertEquals(instance.getStartTime().getTime(), entity.getStartTime().getTime());
|
||||
assertEquals(instance.getEndTime().getTime(), entity.getEndTime().getTime());
|
||||
});
|
||||
|
||||
JobInstance finalInstance = mySvc.fetchInstance(instanceId).orElseThrow(IllegalArgumentException::new);
|
||||
assertEquals(instanceId, finalInstance.getInstanceId());
|
||||
assertEquals(0.5d, finalInstance.getProgress());
|
||||
assertTrue(finalInstance.isWorkChunksPurged());
|
||||
assertEquals(3, finalInstance.getErrorCount());
|
||||
assertEquals(instance.getReport(), finalInstance.getReport());
|
||||
assertEquals(instance.getEstimatedTimeRemaining(), finalInstance.getEstimatedTimeRemaining());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void markWorkChunksWithStatusAndWipeData_marksMultipleChunksWithStatus_asExpected() {
|
||||
JobInstance instance = createInstance();
|
||||
|
@ -632,11 +659,10 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
|
|||
}
|
||||
|
||||
private WorkChunk freshFetchWorkChunk(String chunkId) {
|
||||
return runInTransaction(() -> {
|
||||
return myWorkChunkRepository.findById(chunkId)
|
||||
.map(e-> JobInstanceUtil.fromEntityToWorkChunk(e, true))
|
||||
.orElseThrow(IllegalArgumentException::new);
|
||||
});
|
||||
return runInTransaction(() ->
|
||||
myWorkChunkRepository.findById(chunkId)
|
||||
.map(e-> JobInstanceUtil.fromEntityToWorkChunk(e))
|
||||
.orElseThrow(IllegalArgumentException::new));
|
||||
}
|
||||
|
||||
|
||||
|
@ -663,15 +689,11 @@ public class JpaJobPersistenceImplTest extends BaseJpaR4Test {
|
|||
|
||||
final String id = mySvc.storeNewInstance(jobInstance);
|
||||
|
||||
jobInstance.setInstanceId(id);
|
||||
final LocalDateTime localDateTime = LocalDateTime.now()
|
||||
.minusMinutes(minutes);
|
||||
ourLog.info("localDateTime: {}", localDateTime);
|
||||
jobInstance.setEndTime(Date.from(localDateTime
|
||||
.atZone(ZoneId.systemDefault())
|
||||
.toInstant()));
|
||||
mySvc.updateInstance(id, instance->{
|
||||
instance.setEndTime(Date.from(Instant.now().minus(minutes, ChronoUnit.MINUTES)));
|
||||
return true;
|
||||
});
|
||||
|
||||
mySvc.updateInstance(jobInstance);
|
||||
|
||||
return id;
|
||||
}
|
||||
|
|
|
@ -99,7 +99,7 @@ public class Batch2JobHelper {
|
|||
.map(t -> t.getJobDefinitionId() + "/" + t.getStatus().name())
|
||||
.collect(Collectors.joining("\n"));
|
||||
String currentStatus = myJobCoordinator.getInstance(theBatchJobId).getStatus().name();
|
||||
fail("Job still has status " + currentStatus + " - All statuses:\n" + statuses);
|
||||
fail("Job " + theBatchJobId + " still has status " + currentStatus + " - All statuses:\n" + statuses);
|
||||
}
|
||||
return myJobCoordinator.getInstance(theBatchJobId);
|
||||
}
|
||||
|
@ -131,7 +131,9 @@ public class Batch2JobHelper {
|
|||
}
|
||||
|
||||
private boolean hasStatus(String theBatchJobId, StatusEnum[] theExpectedStatuses) {
|
||||
return ArrayUtils.contains(theExpectedStatuses, getStatus(theBatchJobId));
|
||||
StatusEnum status = getStatus(theBatchJobId);
|
||||
ourLog.debug("Checking status of {} in {}: is {}", theBatchJobId, theExpectedStatuses, status);
|
||||
return ArrayUtils.contains(theExpectedStatuses, status);
|
||||
}
|
||||
|
||||
private StatusEnum getStatus(String theBatchJobId) {
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
package ca.uhn.hapi.fhir.batch2.test;
|
||||
|
||||
/*-
|
||||
* #%L
|
||||
* HAPI FHIR JPA Server - Batch2 specification tests
|
||||
|
@ -20,7 +18,14 @@ package ca.uhn.hapi.fhir.batch2.test;
|
|||
* #L%
|
||||
*/
|
||||
|
||||
package ca.uhn.hapi.fhir.batch2.test;
|
||||
|
||||
import ca.uhn.fhir.batch2.api.IJobPersistence;
|
||||
import ca.uhn.fhir.batch2.api.RunOutcome;
|
||||
import ca.uhn.fhir.batch2.coordinator.JobDefinitionRegistry;
|
||||
import ca.uhn.fhir.batch2.maintenance.JobChunkProgressAccumulator;
|
||||
import ca.uhn.fhir.batch2.maintenance.JobInstanceProcessor;
|
||||
import ca.uhn.fhir.batch2.model.JobDefinition;
|
||||
import ca.uhn.fhir.batch2.model.JobInstance;
|
||||
import ca.uhn.fhir.batch2.model.StatusEnum;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunk;
|
||||
|
@ -30,11 +35,16 @@ import ca.uhn.fhir.batch2.model.WorkChunkErrorEvent;
|
|||
import ca.uhn.fhir.batch2.model.WorkChunkStatusEnum;
|
||||
import ca.uhn.fhir.rest.server.exceptions.InternalErrorException;
|
||||
import ca.uhn.fhir.util.StopWatch;
|
||||
import ca.uhn.hapi.fhir.batch2.test.support.TestJobParameters;
|
||||
import ca.uhn.hapi.fhir.batch2.test.support.TestJobStep2InputType;
|
||||
import ca.uhn.hapi.fhir.batch2.test.support.TestJobStep3InputType;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Nested;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.params.ParameterizedTest;
|
||||
import org.junit.jupiter.params.provider.EnumSource;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.transaction.PlatformTransactionManager;
|
||||
import org.springframework.transaction.TransactionDefinition;
|
||||
|
@ -54,19 +64,22 @@ import static org.hamcrest.MatcherAssert.assertThat;
|
|||
import static org.hamcrest.Matchers.contains;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.empty;
|
||||
import static org.hamcrest.Matchers.emptyString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertNotNull;
|
||||
import static org.junit.jupiter.api.Assertions.assertNull;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Specification tests for batch2 storage and event system.
|
||||
* These tests are abstract, and do not depend on JPA.
|
||||
* Test setups should use the public batch2 api to create scenarios.
|
||||
*/
|
||||
public abstract class AbstractIJobPersistenceSpecificationTest {
|
||||
private static final Logger ourLog = LoggerFactory.getLogger(AbstractIJobPersistenceSpecificationTest.class);
|
||||
|
||||
public static final String JOB_DEFINITION_ID = "definition-id";
|
||||
public static final String TARGET_STEP_ID = "step-id";
|
||||
|
@ -489,9 +502,56 @@ public abstract class AbstractIJobPersistenceSpecificationTest {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test
|
||||
* * @see hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/docs/server_jpa_batch/batch2_states.md
|
||||
*/
|
||||
@Nested
|
||||
class InstanceStateTransitions {
|
||||
|
||||
@Test
|
||||
void createInstance_createsInQueuedWithChunk() {
|
||||
// given
|
||||
JobDefinition<?> jd = withJobDefinition();
|
||||
|
||||
// when
|
||||
IJobPersistence.CreateResult createResult =
|
||||
newTxTemplate().execute(status->
|
||||
mySvc.onCreateWithFirstChunk(jd, "{}"));
|
||||
|
||||
// then
|
||||
ourLog.info("job and chunk created {}", createResult);
|
||||
assertNotNull(createResult);
|
||||
assertThat(createResult.jobInstanceId, not(emptyString()));
|
||||
assertThat(createResult.workChunkId, not(emptyString()));
|
||||
|
||||
JobInstance jobInstance = freshFetchJobInstance(createResult.jobInstanceId);
|
||||
assertThat(jobInstance.getStatus(), equalTo(StatusEnum.QUEUED));
|
||||
assertThat(jobInstance.getParameters(), equalTo("{}"));
|
||||
|
||||
WorkChunk firstChunk = freshFetchWorkChunk(createResult.workChunkId);
|
||||
assertThat(firstChunk.getStatus(), equalTo(WorkChunkStatusEnum.QUEUED));
|
||||
assertNull(firstChunk.getData(), "First chunk data is null - only uses parameters");
|
||||
}
|
||||
|
||||
@Test
|
||||
void testCreateInstance_firstChunkDequeued_movesToInProgress() {
|
||||
// given
|
||||
JobDefinition<?> jd = withJobDefinition();
|
||||
IJobPersistence.CreateResult createResult = newTxTemplate().execute(status->
|
||||
mySvc.onCreateWithFirstChunk(jd, "{}"));
|
||||
assertNotNull(createResult);
|
||||
|
||||
// when
|
||||
newTxTemplate().execute(status -> mySvc.onChunkDequeued(createResult.jobInstanceId));
|
||||
|
||||
// then
|
||||
JobInstance jobInstance = freshFetchJobInstance(createResult.jobInstanceId);
|
||||
assertThat(jobInstance.getStatus(), equalTo(StatusEnum.IN_PROGRESS));
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ParameterizedTest
|
||||
@EnumSource(StatusEnum.class)
|
||||
void cancelRequest_cancelsJob_whenNotFinalState(StatusEnum theState) {
|
||||
|
@ -505,22 +565,76 @@ public abstract class AbstractIJobPersistenceSpecificationTest {
|
|||
normalInstance.setStatus(theState);
|
||||
String instanceId2 = mySvc.storeNewInstance(normalInstance);
|
||||
|
||||
JobDefinitionRegistry jobDefinitionRegistry = new JobDefinitionRegistry();
|
||||
jobDefinitionRegistry.addJobDefinitionIfNotRegistered(withJobDefinition());
|
||||
|
||||
|
||||
// when
|
||||
runInTransaction(()-> mySvc.processCancelRequests());
|
||||
runInTransaction(()-> new JobInstanceProcessor(mySvc, null, instanceId1, new JobChunkProgressAccumulator(), null, jobDefinitionRegistry)
|
||||
.process());
|
||||
|
||||
|
||||
// then
|
||||
JobInstance freshInstance1 = mySvc.fetchInstance(instanceId1).orElseThrow();
|
||||
if (theState.isCancellable()) {
|
||||
assertEquals(StatusEnum.CANCELLED, freshInstance1.getStatus(), "cancel request processed");
|
||||
assertThat(freshInstance1.getErrorMessage(), containsString("Job instance cancelled"));
|
||||
} else {
|
||||
assertEquals(theState, freshInstance1.getStatus(), "cancel request ignored - state unchanged");
|
||||
assertNull(freshInstance1.getErrorMessage(), "no error message");
|
||||
}
|
||||
JobInstance freshInstance2 = mySvc.fetchInstance(instanceId2).orElseThrow();
|
||||
assertEquals(theState, freshInstance2.getStatus(), "cancel request ignored - cancelled not set");
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void testInstanceUpdate_modifierApplied() {
|
||||
// given
|
||||
String instanceId = mySvc.storeNewInstance(createInstance());
|
||||
|
||||
// when
|
||||
mySvc.updateInstance(instanceId, instance ->{
|
||||
instance.setErrorCount(42);
|
||||
return true;
|
||||
});
|
||||
|
||||
// then
|
||||
JobInstance jobInstance = freshFetchJobInstance(instanceId);
|
||||
assertEquals(42, jobInstance.getErrorCount());
|
||||
}
|
||||
|
||||
@Test
|
||||
void testInstanceUpdate_modifierNotAppliedWhenPredicateReturnsFalse() {
|
||||
// given
|
||||
JobInstance instance1 = createInstance();
|
||||
boolean initialValue = true;
|
||||
instance1.setFastTracking(initialValue);
|
||||
String instanceId = mySvc.storeNewInstance(instance1);
|
||||
|
||||
// when
|
||||
mySvc.updateInstance(instanceId, instance ->{
|
||||
instance.setFastTracking(false);
|
||||
return false;
|
||||
});
|
||||
|
||||
// then
|
||||
JobInstance jobInstance = freshFetchJobInstance(instanceId);
|
||||
assertEquals(initialValue, jobInstance.isFastTracking());
|
||||
}
|
||||
|
||||
private JobDefinition<TestJobParameters> withJobDefinition() {
|
||||
return JobDefinition.newBuilder()
|
||||
.setJobDefinitionId(JOB_DEFINITION_ID)
|
||||
.setJobDefinitionVersion(JOB_DEF_VER)
|
||||
.setJobDescription("A job description")
|
||||
.setParametersType(TestJobParameters.class)
|
||||
.addFirstStep(TARGET_STEP_ID, "the first step", TestJobStep2InputType.class, (theStepExecutionDetails, theDataSink) -> new RunOutcome(0))
|
||||
.addIntermediateStep("2nd-step-id", "the second step", TestJobStep3InputType.class, (theStepExecutionDetails, theDataSink) -> new RunOutcome(0))
|
||||
.addLastStep("last-step-id", "the final step", (theStepExecutionDetails, theDataSink) -> new RunOutcome(0))
|
||||
.build();
|
||||
}
|
||||
|
||||
|
||||
@Nonnull
|
||||
private JobInstance createInstance() {
|
||||
|
@ -540,7 +654,10 @@ public abstract class AbstractIJobPersistenceSpecificationTest {
|
|||
|
||||
|
||||
protected abstract PlatformTransactionManager getTxManager();
|
||||
protected abstract WorkChunk freshFetchWorkChunk(String chunkId);
|
||||
protected abstract WorkChunk freshFetchWorkChunk(String theChunkId);
|
||||
protected JobInstance freshFetchJobInstance(String theInstanceId) {
|
||||
return runInTransaction(() -> mySvc.fetchInstance(theInstanceId).orElseThrow());
|
||||
}
|
||||
|
||||
public TransactionTemplate newTxTemplate() {
|
||||
TransactionTemplate retVal = new TransactionTemplate(getTxManager());
|
||||
|
|
|
@ -0,0 +1,52 @@
|
|||
package ca.uhn.hapi.fhir.batch2.test.support;
|
||||
|
||||
import ca.uhn.fhir.model.api.IModelJson;
|
||||
import ca.uhn.fhir.model.api.annotation.PasswordField;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import org.hibernate.validator.constraints.Length;
|
||||
|
||||
import javax.validation.constraints.NotBlank;
|
||||
|
||||
public class TestJobParameters implements IModelJson {
|
||||
|
||||
@JsonProperty("param1")
|
||||
@NotBlank
|
||||
private String myParam1;
|
||||
|
||||
@JsonProperty("param2")
|
||||
@NotBlank
|
||||
@Length(min = 5, max = 100)
|
||||
private String myParam2;
|
||||
|
||||
@JsonProperty(value = "password")
|
||||
@PasswordField
|
||||
private String myPassword;
|
||||
|
||||
public String getPassword() {
|
||||
return myPassword;
|
||||
}
|
||||
|
||||
public TestJobParameters setPassword(String thePassword) {
|
||||
myPassword = thePassword;
|
||||
return this;
|
||||
}
|
||||
|
||||
public String getParam1() {
|
||||
return myParam1;
|
||||
}
|
||||
|
||||
public TestJobParameters setParam1(String theParam1) {
|
||||
myParam1 = theParam1;
|
||||
return this;
|
||||
}
|
||||
|
||||
public String getParam2() {
|
||||
return myParam2;
|
||||
}
|
||||
|
||||
public TestJobParameters setParam2(String theParam2) {
|
||||
myParam2 = theParam2;
|
||||
return this;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
package ca.uhn.hapi.fhir.batch2.test.support;
|
||||
|
||||
import ca.uhn.fhir.model.api.IModelJson;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
|
||||
public class TestJobStep2InputType implements IModelJson {
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
*/
|
||||
public TestJobStep2InputType() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
*/
|
||||
public TestJobStep2InputType(String theData1, String theData2) {
|
||||
myData1 = theData1;
|
||||
myData2 = theData2;
|
||||
}
|
||||
|
||||
@JsonProperty("data1")
|
||||
private String myData1;
|
||||
@JsonProperty("data2")
|
||||
private String myData2;
|
||||
|
||||
public String getData1() {
|
||||
return myData1;
|
||||
}
|
||||
|
||||
public void setData1(String theData1) {
|
||||
myData1 = theData1;
|
||||
}
|
||||
|
||||
public String getData2() {
|
||||
return myData2;
|
||||
}
|
||||
|
||||
public void setData2(String theData2) {
|
||||
myData2 = theData2;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
package ca.uhn.hapi.fhir.batch2.test.support;
|
||||
|
||||
import ca.uhn.fhir.model.api.IModelJson;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
|
||||
public class TestJobStep3InputType implements IModelJson {
|
||||
|
||||
@JsonProperty("data3")
|
||||
private String myData3;
|
||||
@JsonProperty("data4")
|
||||
private String myData4;
|
||||
|
||||
public String getData3() {
|
||||
return myData3;
|
||||
}
|
||||
|
||||
public TestJobStep3InputType setData3(String theData1) {
|
||||
myData3 = theData1;
|
||||
return this;
|
||||
}
|
||||
|
||||
public String getData4() {
|
||||
return myData4;
|
||||
}
|
||||
|
||||
public TestJobStep3InputType setData4(String theData2) {
|
||||
myData4 = theData2;
|
||||
return this;
|
||||
}
|
||||
|
||||
}
|
|
@ -20,14 +20,23 @@
|
|||
package ca.uhn.fhir.batch2.api;
|
||||
|
||||
import ca.uhn.fhir.batch2.model.FetchJobInstancesRequest;
|
||||
import ca.uhn.fhir.batch2.model.JobDefinition;
|
||||
import ca.uhn.fhir.batch2.model.JobInstance;
|
||||
import ca.uhn.fhir.batch2.model.StatusEnum;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunk;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunkCreateEvent;
|
||||
import ca.uhn.fhir.batch2.models.JobInstanceFetchRequest;
|
||||
import ca.uhn.fhir.i18n.Msg;
|
||||
import org.apache.commons.lang3.builder.ToStringBuilder;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.data.domain.Page;
|
||||
import org.springframework.data.domain.Pageable;
|
||||
import org.springframework.transaction.annotation.Propagation;
|
||||
import org.springframework.transaction.annotation.Transactional;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
@ -38,8 +47,12 @@ import java.util.stream.Stream;
|
|||
/**
|
||||
*
|
||||
* Some of this is tested in {@link ca.uhn.hapi.fhir.batch2.test.AbstractIJobPersistenceSpecificationTest}
|
||||
* This is a transactional interface, but we have pushed the declaration of calls that have
|
||||
* {@code @Transactional(propagation = Propagation.REQUIRES_NEW)} down to the implementations since we have a synchronized
|
||||
* wrapper that was double-createing the NEW transaction.
|
||||
*/
|
||||
public interface IJobPersistence extends IWorkChunkPersistence {
|
||||
Logger ourLog = LoggerFactory.getLogger(IJobPersistence.class);
|
||||
|
||||
|
||||
/**
|
||||
|
@ -47,6 +60,7 @@ public interface IJobPersistence extends IWorkChunkPersistence {
|
|||
*
|
||||
* @param theInstance The details
|
||||
*/
|
||||
@Transactional(propagation = Propagation.REQUIRED)
|
||||
String storeNewInstance(JobInstance theInstance);
|
||||
|
||||
/**
|
||||
|
@ -97,6 +111,7 @@ public interface IJobPersistence extends IWorkChunkPersistence {
|
|||
/**
|
||||
* Fetches all chunks for a given instance, without loading the data
|
||||
*
|
||||
* TODO MB this seems to only be used by tests. Can we use the iterator instead?
|
||||
* @param theInstanceId The instance ID
|
||||
* @param thePageSize The page size
|
||||
* @param thePageIndex The page index
|
||||
|
@ -114,19 +129,39 @@ public interface IJobPersistence extends IWorkChunkPersistence {
|
|||
Iterator<WorkChunk> fetchAllWorkChunksIterator(String theInstanceId, boolean theWithData);
|
||||
|
||||
/**
|
||||
* Fetch all chunks with data for a given instance for a given step id
|
||||
* Fetch all chunks with data for a given instance for a given step id - read-only.
|
||||
*
|
||||
* @return - a stream for fetching work chunks
|
||||
*/
|
||||
@Transactional(propagation = Propagation.MANDATORY, readOnly = true)
|
||||
Stream<WorkChunk> fetchAllWorkChunksForStepStream(String theInstanceId, String theStepId);
|
||||
|
||||
/**
|
||||
* Update the stored instance. If the status is changing, use {@link ca.uhn.fhir.batch2.progress.JobInstanceStatusUpdater}
|
||||
* instead to ensure state-change callbacks are invoked properly.
|
||||
*
|
||||
* @param theInstance The instance - Must contain an ID
|
||||
* @return true if the status changed
|
||||
* Callback to update a JobInstance within a locked transaction.
|
||||
* Return true from the callback if the record write should continue, or false if
|
||||
* the change should be discarded.
|
||||
*/
|
||||
boolean updateInstance(JobInstance theInstance);
|
||||
@FunctionalInterface
|
||||
interface JobInstanceUpdateCallback {
|
||||
/**
|
||||
* Modify theInstance within a write-lock transaction.
|
||||
* @param theInstance a copy of the instance to modify.
|
||||
* @return true if the change to theInstance should be written back to the db.
|
||||
*/
|
||||
boolean doUpdate(JobInstance theInstance);
|
||||
}
|
||||
|
||||
/**
|
||||
* Goofy hack for now to create a tx boundary.
|
||||
* If the status is changing, use {@link ca.uhn.fhir.batch2.progress.JobInstanceStatusUpdater}
|
||||
* instead to ensure state-change callbacks are invoked properly.
|
||||
*
|
||||
* @param theInstanceId the id of the instance to modify
|
||||
* @param theModifier a hook to modify the instance - return true to finish the record write
|
||||
* @return true if the instance was modified
|
||||
*/
|
||||
// todo mb consider changing callers to actual objects we can unit test.
|
||||
boolean updateInstance(String theInstanceId, JobInstanceUpdateCallback theModifier);
|
||||
|
||||
/**
|
||||
* Deletes the instance and all associated work chunks
|
||||
|
@ -152,6 +187,9 @@ public interface IJobPersistence extends IWorkChunkPersistence {
|
|||
|
||||
boolean markInstanceAsStatus(String theInstance, StatusEnum theStatusEnum);
|
||||
|
||||
@Transactional(propagation = Propagation.MANDATORY)
|
||||
boolean markInstanceAsStatusWhenStatusIn(String theInstance, StatusEnum theStatusEnum, Set<StatusEnum> thePriorStates);
|
||||
|
||||
/**
|
||||
* Marks an instance as cancelled
|
||||
*
|
||||
|
@ -161,6 +199,58 @@ public interface IJobPersistence extends IWorkChunkPersistence {
|
|||
|
||||
void updateInstanceUpdateTime(String theInstanceId);
|
||||
|
||||
void processCancelRequests();
|
||||
|
||||
|
||||
/*
|
||||
* State transition events for job instances.
|
||||
* These cause the transitions along {@link ca.uhn.fhir.batch2.model.StatusEnum}
|
||||
*
|
||||
* @see hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/docs/server_jpa_batch/batch2_states.md
|
||||
*/
|
||||
///////
|
||||
// job events
|
||||
|
||||
class CreateResult {
|
||||
public final String jobInstanceId;
|
||||
public final String workChunkId;
|
||||
|
||||
public CreateResult(String theJobInstanceId, String theWorkChunkId) {
|
||||
jobInstanceId = theJobInstanceId;
|
||||
workChunkId = theWorkChunkId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return new ToStringBuilder(this)
|
||||
.append("jobInstanceId", jobInstanceId)
|
||||
.append("workChunkId", workChunkId)
|
||||
.toString();
|
||||
}
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
default CreateResult onCreateWithFirstChunk(JobDefinition<?> theJobDefinition, String theParameters) {
|
||||
JobInstance instance = JobInstance.fromJobDefinition(theJobDefinition);
|
||||
instance.setParameters(theParameters);
|
||||
instance.setStatus(StatusEnum.QUEUED);
|
||||
|
||||
String instanceId = storeNewInstance(instance);
|
||||
ourLog.info("Stored new {} job {} with status {}", theJobDefinition.getJobDefinitionId(), instanceId, instance.getStatus());
|
||||
ourLog.debug("Job parameters: {}", instance.getParameters());
|
||||
|
||||
WorkChunkCreateEvent batchWorkChunk = WorkChunkCreateEvent.firstChunk(theJobDefinition, instanceId);
|
||||
String chunkId = onWorkChunkCreate(batchWorkChunk);
|
||||
return new CreateResult(instanceId, chunkId);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Move from QUEUED->IN_PROGRESS when a work chunk arrives.
|
||||
* Ignore other prior states.
|
||||
* @return did the transition happen
|
||||
*/
|
||||
default boolean onChunkDequeued(String theJobInstanceId) {
|
||||
return markInstanceAsStatusWhenStatusIn(theJobInstanceId, StatusEnum.IN_PROGRESS, Collections.singleton(StatusEnum.QUEUED));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -19,12 +19,13 @@
|
|||
*/
|
||||
package ca.uhn.fhir.batch2.api;
|
||||
|
||||
import ca.uhn.fhir.batch2.coordinator.BatchWorkChunk;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunk;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunkCompletionEvent;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunkCreateEvent;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunkErrorEvent;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunkStatusEnum;
|
||||
import org.springframework.transaction.annotation.Propagation;
|
||||
import org.springframework.transaction.annotation.Transactional;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
@ -53,35 +54,19 @@ public interface IWorkChunkPersistence {
|
|||
* @param theBatchWorkChunk the batch work chunk to be stored
|
||||
* @return a globally unique identifier for this chunk.
|
||||
*/
|
||||
default String onWorkChunkCreate(WorkChunkCreateEvent theBatchWorkChunk) {
|
||||
// back-compat for one minor version
|
||||
return storeWorkChunk(theBatchWorkChunk);
|
||||
}
|
||||
// wipmb for deletion
|
||||
@Deprecated(since="6.5.6")
|
||||
default String storeWorkChunk(BatchWorkChunk theBatchWorkChunk) {
|
||||
// dead code in 6.5.7
|
||||
return null;
|
||||
}
|
||||
@Transactional(propagation = Propagation.REQUIRED)
|
||||
String onWorkChunkCreate(WorkChunkCreateEvent theBatchWorkChunk);
|
||||
|
||||
/**
|
||||
* On arrival at a worker.
|
||||
* The second state event, as the worker starts processing.
|
||||
* Transition to {@link WorkChunkStatusEnum#IN_PROGRESS} if unless not in QUEUED or ERRORRED state.
|
||||
*
|
||||
* @param theChunkId The ID from {@link #onWorkChunkCreate(BatchWorkChunk theBatchWorkChunk)}
|
||||
* @param theChunkId The ID from {@link #onWorkChunkCreate}
|
||||
* @return The WorkChunk or empty if no chunk exists, or not in a runnable state (QUEUED or ERRORRED)
|
||||
*/
|
||||
default Optional<WorkChunk> onWorkChunkDequeue(String theChunkId) {
|
||||
// back-compat for one minor version
|
||||
return fetchWorkChunkSetStartTimeAndMarkInProgress(theChunkId);
|
||||
}
|
||||
// wipmb for deletion
|
||||
@Deprecated(since="6.5.6")
|
||||
default Optional<WorkChunk> fetchWorkChunkSetStartTimeAndMarkInProgress(String theChunkId) {
|
||||
// dead code
|
||||
return null;
|
||||
}
|
||||
@Transactional(propagation = Propagation.REQUIRED)
|
||||
Optional<WorkChunk> onWorkChunkDequeue(String theChunkId);
|
||||
|
||||
/**
|
||||
* A retryable error.
|
||||
|
@ -91,17 +76,7 @@ public interface IWorkChunkPersistence {
|
|||
* @param theParameters - the error message and max retry count.
|
||||
* @return - the new status - ERRORED or ERRORED, depending on retry count
|
||||
*/
|
||||
default WorkChunkStatusEnum onWorkChunkError(WorkChunkErrorEvent theParameters) {
|
||||
// back-compat for one minor version
|
||||
return workChunkErrorEvent(theParameters);
|
||||
}
|
||||
|
||||
// wipmb for deletion
|
||||
@Deprecated(since="6.5.6")
|
||||
default WorkChunkStatusEnum workChunkErrorEvent(WorkChunkErrorEvent theParameters) {
|
||||
// dead code in 6.5.7
|
||||
return null;
|
||||
}
|
||||
WorkChunkStatusEnum onWorkChunkError(WorkChunkErrorEvent theParameters);
|
||||
|
||||
/**
|
||||
* An unrecoverable error.
|
||||
|
@ -109,17 +84,8 @@ public interface IWorkChunkPersistence {
|
|||
*
|
||||
* @param theChunkId The chunk ID
|
||||
*/
|
||||
default void onWorkChunkFailed(String theChunkId, String theErrorMessage) {
|
||||
// back-compat for one minor version
|
||||
markWorkChunkAsFailed(theChunkId, theErrorMessage);
|
||||
}
|
||||
|
||||
|
||||
// wipmb for deletion
|
||||
@Deprecated(since="6.5.6")
|
||||
default void markWorkChunkAsFailed(String theChunkId, String theErrorMessage) {
|
||||
// dead code in 6.5.7
|
||||
}
|
||||
@Transactional(propagation = Propagation.REQUIRED)
|
||||
void onWorkChunkFailed(String theChunkId, String theErrorMessage);
|
||||
|
||||
|
||||
/**
|
||||
|
@ -128,15 +94,8 @@ public interface IWorkChunkPersistence {
|
|||
*
|
||||
* @param theEvent with record and error count
|
||||
*/
|
||||
default void onWorkChunkCompletion(WorkChunkCompletionEvent theEvent) {
|
||||
// back-compat for one minor version
|
||||
workChunkCompletionEvent(theEvent);
|
||||
}
|
||||
// wipmb for deletion
|
||||
@Deprecated(since="6.5.6")
|
||||
default void workChunkCompletionEvent(WorkChunkCompletionEvent theEvent) {
|
||||
// dead code in 6.5.7
|
||||
}
|
||||
@Transactional(propagation = Propagation.REQUIRED)
|
||||
void onWorkChunkCompletion(WorkChunkCompletionEvent theEvent);
|
||||
|
||||
/**
|
||||
* Marks all work chunks with the provided status and erases the data
|
||||
|
@ -146,6 +105,7 @@ public interface IWorkChunkPersistence {
|
|||
* @param theStatus - the status to mark
|
||||
* @param theErrorMsg - error message (if status warrants it)
|
||||
*/
|
||||
@Transactional(propagation = Propagation.MANDATORY)
|
||||
void markWorkChunksWithStatusAndWipeData(String theInstanceId, List<String> theChunkIds, WorkChunkStatusEnum theStatus, String theErrorMsg);
|
||||
|
||||
|
||||
|
|
|
@ -71,14 +71,16 @@ public abstract class BaseBatch2Config {
|
|||
public IJobCoordinator batch2JobCoordinator(JobDefinitionRegistry theJobDefinitionRegistry,
|
||||
BatchJobSender theBatchJobSender,
|
||||
WorkChunkProcessor theExecutor,
|
||||
IJobMaintenanceService theJobMaintenanceService) {
|
||||
IJobMaintenanceService theJobMaintenanceService,
|
||||
IHapiTransactionService theTransactionService) {
|
||||
return new JobCoordinatorImpl(
|
||||
theBatchJobSender,
|
||||
batch2ProcessingChannelReceiver(myChannelFactory),
|
||||
myPersistence,
|
||||
theJobDefinitionRegistry,
|
||||
theExecutor,
|
||||
theJobMaintenanceService);
|
||||
theJobMaintenanceService,
|
||||
theTransactionService);
|
||||
}
|
||||
|
||||
@Bean
|
||||
|
|
|
@ -1,90 +0,0 @@
|
|||
/*-
|
||||
* #%L
|
||||
* HAPI FHIR JPA Server - Batch2 Task Processor
|
||||
* %%
|
||||
* Copyright (C) 2014 - 2023 Smile CDR, Inc.
|
||||
* %%
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
* #L%
|
||||
*/
|
||||
package ca.uhn.fhir.batch2.coordinator;
|
||||
|
||||
import ca.uhn.fhir.batch2.model.JobDefinition;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunkCreateEvent;
|
||||
import org.apache.commons.lang3.builder.EqualsBuilder;
|
||||
import org.apache.commons.lang3.builder.HashCodeBuilder;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import javax.annotation.Nullable;
|
||||
|
||||
/**
|
||||
* wipmb delete, and push down to WorkChunkCreateEvent
|
||||
*/
|
||||
public class BatchWorkChunk {
|
||||
|
||||
public final String jobDefinitionId;
|
||||
public final int jobDefinitionVersion;
|
||||
public final String targetStepId;
|
||||
public final String instanceId;
|
||||
public final int sequence;
|
||||
public final String serializedData;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
*
|
||||
* @param theJobDefinitionId The job definition ID
|
||||
* @param theJobDefinitionVersion The job definition version
|
||||
* @param theTargetStepId The step ID that will be responsible for consuming this chunk
|
||||
* @param theInstanceId The instance ID associated with this chunk
|
||||
* @param theSerializedData The data. This will be in the form of a map where the values may be strings, lists, and other maps (i.e. JSON)
|
||||
*/
|
||||
|
||||
public BatchWorkChunk(@Nonnull String theJobDefinitionId, int theJobDefinitionVersion, @Nonnull String theTargetStepId, @Nonnull String theInstanceId, int theSequence, @Nullable String theSerializedData) {
|
||||
jobDefinitionId = theJobDefinitionId;
|
||||
jobDefinitionVersion = theJobDefinitionVersion;
|
||||
targetStepId = theTargetStepId;
|
||||
instanceId = theInstanceId;
|
||||
sequence = theSequence;
|
||||
serializedData = theSerializedData;
|
||||
}
|
||||
|
||||
public static WorkChunkCreateEvent firstChunk(JobDefinition<?> theJobDefinition, String theInstanceId) {
|
||||
String firstStepId = theJobDefinition.getFirstStepId();
|
||||
String jobDefinitionId = theJobDefinition.getJobDefinitionId();
|
||||
int jobDefinitionVersion = theJobDefinition.getJobDefinitionVersion();
|
||||
return new WorkChunkCreateEvent(jobDefinitionId, jobDefinitionVersion, firstStepId, theInstanceId, 0, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object theO) {
|
||||
if (this == theO) return true;
|
||||
|
||||
if (theO == null || getClass() != theO.getClass()) return false;
|
||||
|
||||
BatchWorkChunk that = (BatchWorkChunk) theO;
|
||||
|
||||
return new EqualsBuilder()
|
||||
.append(jobDefinitionVersion, that.jobDefinitionVersion)
|
||||
.append(sequence, that.sequence)
|
||||
.append(jobDefinitionId, that.jobDefinitionId)
|
||||
.append(targetStepId, that.targetStepId)
|
||||
.append(instanceId, that.instanceId)
|
||||
.append(serializedData, that.serializedData)
|
||||
.isEquals();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return new HashCodeBuilder(17, 37).append(jobDefinitionId).append(jobDefinitionVersion).append(targetStepId).append(instanceId).append(sequence).append(serializedData).toHashCode();
|
||||
}
|
||||
}
|
|
@ -30,10 +30,10 @@ import ca.uhn.fhir.batch2.model.JobInstance;
|
|||
import ca.uhn.fhir.batch2.model.JobInstanceStartRequest;
|
||||
import ca.uhn.fhir.batch2.model.JobWorkNotification;
|
||||
import ca.uhn.fhir.batch2.model.StatusEnum;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunkCreateEvent;
|
||||
import ca.uhn.fhir.batch2.models.JobInstanceFetchRequest;
|
||||
import ca.uhn.fhir.i18n.Msg;
|
||||
import ca.uhn.fhir.jpa.batch.models.Batch2JobStartResponse;
|
||||
import ca.uhn.fhir.jpa.dao.tx.IHapiTransactionService;
|
||||
import ca.uhn.fhir.jpa.subscription.channel.api.IChannelReceiver;
|
||||
import ca.uhn.fhir.rest.server.exceptions.InvalidRequestException;
|
||||
import ca.uhn.fhir.rest.server.exceptions.ResourceNotFoundException;
|
||||
|
@ -48,7 +48,6 @@ import javax.annotation.Nullable;
|
|||
import javax.annotation.PostConstruct;
|
||||
import javax.annotation.PreDestroy;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
@ -65,6 +64,7 @@ public class JobCoordinatorImpl implements IJobCoordinator {
|
|||
private final MessageHandler myReceiverHandler;
|
||||
private final JobQuerySvc myJobQuerySvc;
|
||||
private final JobParameterJsonValidator myJobParameterJsonValidator;
|
||||
private final IHapiTransactionService myTransactionService;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
|
@ -74,7 +74,8 @@ public class JobCoordinatorImpl implements IJobCoordinator {
|
|||
@Nonnull IJobPersistence theJobPersistence,
|
||||
@Nonnull JobDefinitionRegistry theJobDefinitionRegistry,
|
||||
@Nonnull WorkChunkProcessor theExecutorSvc,
|
||||
@Nonnull IJobMaintenanceService theJobMaintenanceService) {
|
||||
@Nonnull IJobMaintenanceService theJobMaintenanceService,
|
||||
@Nonnull IHapiTransactionService theTransactionService) {
|
||||
Validate.notNull(theJobPersistence);
|
||||
|
||||
myJobPersistence = theJobPersistence;
|
||||
|
@ -82,16 +83,14 @@ public class JobCoordinatorImpl implements IJobCoordinator {
|
|||
myWorkChannelReceiver = theWorkChannelReceiver;
|
||||
myJobDefinitionRegistry = theJobDefinitionRegistry;
|
||||
|
||||
myReceiverHandler = new WorkChannelMessageHandler(theJobPersistence, theJobDefinitionRegistry, theBatchJobSender, theExecutorSvc, theJobMaintenanceService);
|
||||
myReceiverHandler = new WorkChannelMessageHandler(theJobPersistence, theJobDefinitionRegistry, theBatchJobSender, theExecutorSvc, theJobMaintenanceService, theTransactionService);
|
||||
myJobQuerySvc = new JobQuerySvc(theJobPersistence, theJobDefinitionRegistry);
|
||||
myJobParameterJsonValidator = new JobParameterJsonValidator();
|
||||
myTransactionService = theTransactionService;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Batch2JobStartResponse startInstance(JobInstanceStartRequest theStartRequest) {
|
||||
JobDefinition<?> jobDefinition = myJobDefinitionRegistry
|
||||
.getLatestJobDefinition(theStartRequest.getJobDefinitionId()).orElseThrow(() -> new IllegalArgumentException(Msg.code(2063) + "Unknown job definition ID: " + theStartRequest.getJobDefinitionId()));
|
||||
|
||||
String paramsString = theStartRequest.getParameters();
|
||||
if (isBlank(paramsString)) {
|
||||
throw new InvalidRequestException(Msg.code(2065) + "No parameters supplied");
|
||||
|
@ -103,9 +102,9 @@ public class JobCoordinatorImpl implements IJobCoordinator {
|
|||
List<JobInstance> existing = myJobPersistence.fetchInstances(request, 0, 1000);
|
||||
if (!existing.isEmpty()) {
|
||||
// we'll look for completed ones first... otherwise, take any of the others
|
||||
Collections.sort(existing, (o1, o2) -> -(o1.getStatus().ordinal() - o2.getStatus().ordinal()));
|
||||
existing.sort((o1, o2) -> -(o1.getStatus().ordinal() - o2.getStatus().ordinal()));
|
||||
|
||||
JobInstance first = existing.stream().findFirst().get();
|
||||
JobInstance first = existing.stream().findFirst().orElseThrow();
|
||||
|
||||
Batch2JobStartResponse response = new Batch2JobStartResponse();
|
||||
response.setInstanceId(first.getInstanceId());
|
||||
|
@ -117,24 +116,21 @@ public class JobCoordinatorImpl implements IJobCoordinator {
|
|||
}
|
||||
}
|
||||
|
||||
JobDefinition<?> jobDefinition = myJobDefinitionRegistry
|
||||
.getLatestJobDefinition(theStartRequest.getJobDefinitionId()).orElseThrow(() -> new IllegalArgumentException(Msg.code(2063) + "Unknown job definition ID: " + theStartRequest.getJobDefinitionId()));
|
||||
|
||||
myJobParameterJsonValidator.validateJobParameters(theStartRequest, jobDefinition);
|
||||
|
||||
JobInstance instance = JobInstance.fromJobDefinition(jobDefinition);
|
||||
instance.setParameters(theStartRequest.getParameters());
|
||||
instance.setStatus(StatusEnum.QUEUED);
|
||||
|
||||
String instanceId = myJobPersistence.storeNewInstance(instance);
|
||||
ourLog.info("Stored new {} job {} with status {}", jobDefinition.getJobDefinitionId(), instanceId, instance.getStatus());
|
||||
ourLog.debug("Job parameters: {}", instance.getParameters());
|
||||
IJobPersistence.CreateResult instanceAndFirstChunk =
|
||||
myTransactionService.withSystemRequest().execute(() ->
|
||||
myJobPersistence.onCreateWithFirstChunk(jobDefinition, theStartRequest.getParameters()));
|
||||
|
||||
WorkChunkCreateEvent batchWorkChunk = WorkChunkCreateEvent.firstChunk(jobDefinition, instanceId);
|
||||
String chunkId = myJobPersistence.onWorkChunkCreate(batchWorkChunk);
|
||||
|
||||
JobWorkNotification workNotification = JobWorkNotification.firstStepNotification(jobDefinition, instanceId, chunkId);
|
||||
JobWorkNotification workNotification = JobWorkNotification.firstStepNotification(jobDefinition, instanceAndFirstChunk.jobInstanceId, instanceAndFirstChunk.workChunkId);
|
||||
myBatchJobSender.sendWorkChannelMessage(workNotification);
|
||||
|
||||
Batch2JobStartResponse response = new Batch2JobStartResponse();
|
||||
response.setInstanceId(instanceId);
|
||||
response.setInstanceId(instanceAndFirstChunk.jobInstanceId);
|
||||
return response;
|
||||
}
|
||||
|
||||
|
|
|
@ -68,7 +68,6 @@ class JobDataSink<PT extends IModelJson, IT extends IModelJson, OT extends IMode
|
|||
String instanceId = getInstanceId();
|
||||
String targetStepId = myTargetStep.getStepId();
|
||||
|
||||
// wipmb what is sequence for? It isn't global, so what?
|
||||
int sequence = myChunkCounter.getAndIncrement();
|
||||
OT dataValue = theData.getData();
|
||||
String dataValueString = JsonUtil.serialize(dataValue, false);
|
||||
|
|
|
@ -25,8 +25,8 @@ import ca.uhn.fhir.batch2.model.JobDefinitionStep;
|
|||
import ca.uhn.fhir.batch2.model.JobInstance;
|
||||
import ca.uhn.fhir.context.ConfigurationException;
|
||||
import ca.uhn.fhir.i18n.Msg;
|
||||
import ca.uhn.fhir.util.Logs;
|
||||
import ca.uhn.fhir.model.api.IModelJson;
|
||||
import ca.uhn.fhir.util.Logs;
|
||||
import com.google.common.collect.ImmutableSortedMap;
|
||||
import org.apache.commons.lang3.Validate;
|
||||
import org.slf4j.Logger;
|
||||
|
@ -46,6 +46,7 @@ import java.util.stream.Collectors;
|
|||
public class JobDefinitionRegistry {
|
||||
private static final Logger ourLog = Logs.getBatchTroubleshootingLog();
|
||||
|
||||
// TODO MB is this safe? Can ue use ConcurrentHashMap instead?
|
||||
private volatile Map<String, NavigableMap<Integer, JobDefinition<?>>> myJobs = new HashMap<>();
|
||||
|
||||
/**
|
||||
|
@ -164,13 +165,9 @@ public class JobDefinitionRegistry {
|
|||
return myJobs.isEmpty();
|
||||
}
|
||||
|
||||
public Optional<JobDefinition<?>> getJobDefinition(JobInstance theJobInstance) {
|
||||
return getJobDefinition(theJobInstance.getJobDefinitionId(), theJobInstance.getJobDefinitionVersion());
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public <PT extends IModelJson> JobDefinition<PT> getJobDefinitionOrThrowException(JobInstance theJobInstance) {
|
||||
return (JobDefinition<PT>) getJobDefinitionOrThrowException(theJobInstance.getJobDefinitionId(), theJobInstance.getJobDefinitionVersion());
|
||||
public <T extends IModelJson> JobDefinition<T> getJobDefinitionOrThrowException(JobInstance theJobInstance) {
|
||||
return (JobDefinition<T>) getJobDefinitionOrThrowException(theJobInstance.getJobDefinitionId(), theJobInstance.getJobDefinitionVersion());
|
||||
}
|
||||
|
||||
public Collection<Integer> getJobDefinitionVersions(String theDefinitionId) {
|
||||
|
|
|
@ -21,10 +21,10 @@ package ca.uhn.fhir.batch2.coordinator;
|
|||
|
||||
import ca.uhn.fhir.batch2.api.IJobMaintenanceService;
|
||||
import ca.uhn.fhir.batch2.api.IJobPersistence;
|
||||
import ca.uhn.fhir.batch2.channel.BatchJobSender;
|
||||
import ca.uhn.fhir.batch2.model.JobDefinition;
|
||||
import ca.uhn.fhir.batch2.model.JobInstance;
|
||||
import ca.uhn.fhir.batch2.model.JobWorkCursor;
|
||||
import ca.uhn.fhir.batch2.model.StatusEnum;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunk;
|
||||
import ca.uhn.fhir.batch2.progress.JobInstanceStatusUpdater;
|
||||
import ca.uhn.fhir.model.api.IModelJson;
|
||||
|
@ -38,7 +38,6 @@ public class JobStepExecutor<PT extends IModelJson, IT extends IModelJson, OT ex
|
|||
private static final Logger ourLog = Logs.getBatchTroubleshootingLog();
|
||||
|
||||
private final IJobPersistence myJobPersistence;
|
||||
private final BatchJobSender myBatchJobSender;
|
||||
private final WorkChunkProcessor myJobExecutorSvc;
|
||||
private final IJobMaintenanceService myJobMaintenanceService;
|
||||
private final JobInstanceStatusUpdater myJobInstanceStatusUpdater;
|
||||
|
@ -50,7 +49,6 @@ public class JobStepExecutor<PT extends IModelJson, IT extends IModelJson, OT ex
|
|||
private final JobWorkCursor<PT, IT, OT> myCursor;
|
||||
|
||||
JobStepExecutor(@Nonnull IJobPersistence theJobPersistence,
|
||||
@Nonnull BatchJobSender theBatchJobSender,
|
||||
@Nonnull JobInstance theInstance,
|
||||
WorkChunk theWorkChunk,
|
||||
@Nonnull JobWorkCursor<PT, IT, OT> theCursor,
|
||||
|
@ -58,7 +56,6 @@ public class JobStepExecutor<PT extends IModelJson, IT extends IModelJson, OT ex
|
|||
@Nonnull IJobMaintenanceService theJobMaintenanceService,
|
||||
@Nonnull JobDefinitionRegistry theJobDefinitionRegistry) {
|
||||
myJobPersistence = theJobPersistence;
|
||||
myBatchJobSender = theBatchJobSender;
|
||||
myDefinition = theCursor.jobDefinition;
|
||||
myInstance = theInstance;
|
||||
myInstanceId = theInstance.getInstanceId();
|
||||
|
@ -66,10 +63,9 @@ public class JobStepExecutor<PT extends IModelJson, IT extends IModelJson, OT ex
|
|||
myCursor = theCursor;
|
||||
myJobExecutorSvc = theExecutor;
|
||||
myJobMaintenanceService = theJobMaintenanceService;
|
||||
myJobInstanceStatusUpdater = new JobInstanceStatusUpdater(myJobPersistence, theJobDefinitionRegistry);
|
||||
myJobInstanceStatusUpdater = new JobInstanceStatusUpdater(theJobDefinitionRegistry);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void executeStep() {
|
||||
JobStepExecutorOutput<PT, IT, OT> stepExecutorOutput = myJobExecutorSvc.doExecution(
|
||||
myCursor,
|
||||
|
@ -83,8 +79,11 @@ public class JobStepExecutor<PT extends IModelJson, IT extends IModelJson, OT ex
|
|||
|
||||
if (stepExecutorOutput.getDataSink().firstStepProducedNothing()) {
|
||||
ourLog.info("First step of job myInstance {} produced no work chunks, marking as completed and setting end date", myInstanceId);
|
||||
myInstance.setEndTime(new Date());
|
||||
myJobInstanceStatusUpdater.setCompleted(myInstance);
|
||||
myJobPersistence.updateInstance(myInstance.getInstanceId(), instance->{
|
||||
instance.setEndTime(new Date());
|
||||
myJobInstanceStatusUpdater.updateInstanceStatus(instance, StatusEnum.COMPLETED);
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
if (myInstance.isFastTracking()) {
|
||||
|
@ -97,13 +96,17 @@ public class JobStepExecutor<PT extends IModelJson, IT extends IModelJson, OT ex
|
|||
ourLog.debug("Gated job {} step {} produced exactly one chunk: Triggering a maintenance pass.", myDefinition.getJobDefinitionId(), myCursor.currentStep.getStepId());
|
||||
boolean success = myJobMaintenanceService.triggerMaintenancePass();
|
||||
if (!success) {
|
||||
myInstance.setFastTracking(false);
|
||||
myJobPersistence.updateInstance(myInstance);
|
||||
myJobPersistence.updateInstance(myInstance.getInstanceId(), instance-> {
|
||||
instance.setFastTracking(false);
|
||||
return true;
|
||||
});
|
||||
}
|
||||
} else {
|
||||
ourLog.debug("Gated job {} step {} produced {} chunks: Disabling fast tracking.", myDefinition.getJobDefinitionId(), myCursor.currentStep.getStepId(), theDataSink.getWorkChunkCount());
|
||||
myInstance.setFastTracking(false);
|
||||
myJobPersistence.updateInstance(myInstance);
|
||||
myJobPersistence.updateInstance(myInstance.getInstanceId(), instance-> {
|
||||
instance.setFastTracking(false);
|
||||
return true;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -49,6 +49,6 @@ public class JobStepExecutorFactory {
|
|||
}
|
||||
|
||||
public <PT extends IModelJson, IT extends IModelJson, OT extends IModelJson> JobStepExecutor<PT,IT,OT> newJobStepExecutor(@Nonnull JobInstance theInstance, WorkChunk theWorkChunk, @Nonnull JobWorkCursor<PT, IT, OT> theCursor) {
|
||||
return new JobStepExecutor<>(myJobPersistence, myBatchJobSender, theInstance, theWorkChunk, theCursor, myJobStepExecutorSvc, myJobMaintenanceService, myJobDefinitionRegistry);
|
||||
return new JobStepExecutor<>(myJobPersistence, theInstance, theWorkChunk, theCursor, myJobStepExecutorSvc, myJobMaintenanceService, myJobDefinitionRegistry);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,19 +22,19 @@ package ca.uhn.fhir.batch2.coordinator;
|
|||
import ca.uhn.fhir.batch2.api.IJobPersistence;
|
||||
import ca.uhn.fhir.batch2.api.JobExecutionFailedException;
|
||||
import ca.uhn.fhir.batch2.maintenance.JobChunkProgressAccumulator;
|
||||
import ca.uhn.fhir.batch2.model.JobInstance;
|
||||
import ca.uhn.fhir.batch2.model.JobWorkCursor;
|
||||
import ca.uhn.fhir.batch2.model.StatusEnum;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunkData;
|
||||
import ca.uhn.fhir.batch2.progress.InstanceProgress;
|
||||
import ca.uhn.fhir.batch2.progress.JobInstanceProgressCalculator;
|
||||
import ca.uhn.fhir.i18n.Msg;
|
||||
import ca.uhn.fhir.model.api.IModelJson;
|
||||
import ca.uhn.fhir.util.JsonUtil;
|
||||
import ca.uhn.fhir.util.Logs;
|
||||
import org.apache.commons.lang3.Validate;
|
||||
import org.slf4j.Logger;
|
||||
|
||||
import java.util.Date;
|
||||
import java.util.Optional;
|
||||
|
||||
public class ReductionStepDataSink<PT extends IModelJson, IT extends IModelJson, OT extends IModelJson>
|
||||
extends BaseDataSink<PT, IT, OT> {
|
||||
|
@ -55,9 +55,16 @@ public class ReductionStepDataSink<PT extends IModelJson, IT extends IModelJson,
|
|||
@Override
|
||||
public void accept(WorkChunkData<OT> theData) {
|
||||
String instanceId = getInstanceId();
|
||||
Optional<JobInstance> instanceOp = myJobPersistence.fetchInstance(instanceId);
|
||||
if (instanceOp.isPresent()) {
|
||||
JobInstance instance = instanceOp.get();
|
||||
OT data = theData.getData();
|
||||
String dataString = JsonUtil.serialize(data, false);
|
||||
JobChunkProgressAccumulator progressAccumulator = new JobChunkProgressAccumulator();
|
||||
JobInstanceProgressCalculator myJobInstanceProgressCalculator = new JobInstanceProgressCalculator(myJobPersistence, progressAccumulator, myJobDefinitionRegistry);
|
||||
|
||||
InstanceProgress progress = myJobInstanceProgressCalculator.calculateInstanceProgress(instanceId);
|
||||
boolean changed = myJobPersistence.updateInstance(instanceId, instance -> {
|
||||
Validate.validState(
|
||||
StatusEnum.FINALIZE.equals(instance.getStatus()),
|
||||
"Job %s must be in FINALIZE state. In %s", instanceId, instance.getStatus());
|
||||
|
||||
if (instance.getReport() != null) {
|
||||
// last in wins - so we won't throw
|
||||
|
@ -75,16 +82,12 @@ public class ReductionStepDataSink<PT extends IModelJson, IT extends IModelJson,
|
|||
*
|
||||
* I could envision a better setup where the stuff that the maintenance service touches
|
||||
* is moved into separate DB tables or transactions away from the stuff that the
|
||||
* reducer touches.. If the two could never collide we wouldn't need this duplication
|
||||
* reducer touches. If the two could never collide we wouldn't need this duplication
|
||||
* here. Until then though, this is safer.
|
||||
*/
|
||||
|
||||
JobChunkProgressAccumulator progressAccumulator = new JobChunkProgressAccumulator();
|
||||
JobInstanceProgressCalculator myJobInstanceProgressCalculator = new JobInstanceProgressCalculator(myJobPersistence, progressAccumulator, myJobDefinitionRegistry);
|
||||
myJobInstanceProgressCalculator.calculateInstanceProgressAndPopulateInstance(instance);
|
||||
progress.updateInstance(instance);
|
||||
|
||||
OT data = theData.getData();
|
||||
String dataString = JsonUtil.serialize(data, false);
|
||||
instance.setReport(dataString);
|
||||
instance.setStatus(StatusEnum.COMPLETED);
|
||||
instance.setEndTime(new Date());
|
||||
|
@ -94,15 +97,13 @@ public class ReductionStepDataSink<PT extends IModelJson, IT extends IModelJson,
|
|||
.addArgument(() -> JsonUtil.serialize(instance))
|
||||
.log("New instance state: {}");
|
||||
|
||||
myJobPersistence.updateInstance(instance);
|
||||
return true;
|
||||
});
|
||||
|
||||
ourLog.info("Finalized job instance {} with report length {} chars", instance.getInstanceId(), dataString.length());
|
||||
if (!changed) {
|
||||
ourLog.error("No instance found with Id {} in FINALIZE state", instanceId);
|
||||
|
||||
} else {
|
||||
String msg = "No instance found with Id " + instanceId;
|
||||
ourLog.error(msg);
|
||||
|
||||
throw new JobExecutionFailedException(Msg.code(2097) + msg);
|
||||
throw new JobExecutionFailedException(Msg.code(2097) + ("No instance found with Id " + instanceId));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -120,7 +120,7 @@ public class ReductionStepExecutorServiceImpl implements IReductionStepExecutorS
|
|||
public void triggerReductionStep(String theInstanceId, JobWorkCursor<?, ?, ?> theJobWorkCursor) {
|
||||
myInstanceIdToJobWorkCursor.putIfAbsent(theInstanceId, theJobWorkCursor);
|
||||
if (myCurrentlyExecuting.availablePermits() > 0) {
|
||||
myReducerExecutor.submit(() -> reducerPass());
|
||||
myReducerExecutor.submit(this::reducerPass);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -161,6 +161,7 @@ public class ReductionStepExecutorServiceImpl implements IReductionStepExecutorS
|
|||
switch (instance.getStatus()) {
|
||||
case IN_PROGRESS:
|
||||
case ERRORED:
|
||||
// this will take a write lock on the JobInstance, preventing duplicates.
|
||||
if (myJobPersistence.markInstanceAsStatus(instance.getInstanceId(), StatusEnum.FINALIZE)) {
|
||||
ourLog.info("Job instance {} has been set to FINALIZE state - Beginning reducer step", instance.getInstanceId());
|
||||
shouldProceed = true;
|
||||
|
@ -180,7 +181,7 @@ public class ReductionStepExecutorServiceImpl implements IReductionStepExecutorS
|
|||
+ " This could be a long running reduction job resulting in the processed msg not being acknowledge,"
|
||||
+ " or the result of a failed process or server restarting.",
|
||||
instance.getInstanceId(),
|
||||
instance.getStatus().name()
|
||||
instance.getStatus()
|
||||
);
|
||||
return new ReductionStepChunkProcessingResponse(false);
|
||||
}
|
||||
|
@ -196,9 +197,8 @@ public class ReductionStepExecutorServiceImpl implements IReductionStepExecutorS
|
|||
try {
|
||||
executeInTransactionWithSynchronization(() -> {
|
||||
try (Stream<WorkChunk> chunkIterator = myJobPersistence.fetchAllWorkChunksForStepStream(instance.getInstanceId(), step.getStepId())) {
|
||||
chunkIterator.forEach((chunk) -> {
|
||||
processChunk(chunk, instance, parameters, reductionStepWorker, response, theJobWorkCursor);
|
||||
});
|
||||
chunkIterator.forEach(chunk ->
|
||||
processChunk(chunk, instance, parameters, reductionStepWorker, response, theJobWorkCursor));
|
||||
}
|
||||
return null;
|
||||
});
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
*/
|
||||
package ca.uhn.fhir.batch2.coordinator;
|
||||
|
||||
|
||||
import ca.uhn.fhir.batch2.api.IJobMaintenanceService;
|
||||
import ca.uhn.fhir.batch2.api.IJobPersistence;
|
||||
import ca.uhn.fhir.batch2.channel.BatchJobSender;
|
||||
|
@ -28,12 +27,9 @@ import ca.uhn.fhir.batch2.model.JobInstance;
|
|||
import ca.uhn.fhir.batch2.model.JobWorkCursor;
|
||||
import ca.uhn.fhir.batch2.model.JobWorkNotification;
|
||||
import ca.uhn.fhir.batch2.model.JobWorkNotificationJsonMessage;
|
||||
import ca.uhn.fhir.batch2.model.StatusEnum;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunk;
|
||||
import ca.uhn.fhir.batch2.progress.JobInstanceStatusUpdater;
|
||||
import ca.uhn.fhir.rest.server.exceptions.InternalErrorException;
|
||||
import ca.uhn.fhir.jpa.dao.tx.IHapiTransactionService;
|
||||
import ca.uhn.fhir.util.Logs;
|
||||
import org.apache.commons.lang3.Validate;
|
||||
import org.slf4j.Logger;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.MessageHandler;
|
||||
|
@ -41,6 +37,7 @@ import org.springframework.messaging.MessagingException;
|
|||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* This handler receives batch work request messages and performs the batch work requested by the message
|
||||
|
@ -50,17 +47,18 @@ class WorkChannelMessageHandler implements MessageHandler {
|
|||
private final IJobPersistence myJobPersistence;
|
||||
private final JobDefinitionRegistry myJobDefinitionRegistry;
|
||||
private final JobStepExecutorFactory myJobStepExecutorFactory;
|
||||
private final JobInstanceStatusUpdater myJobInstanceStatusUpdater;
|
||||
private final IHapiTransactionService myHapiTransactionService;
|
||||
|
||||
WorkChannelMessageHandler(@Nonnull IJobPersistence theJobPersistence,
|
||||
@Nonnull JobDefinitionRegistry theJobDefinitionRegistry,
|
||||
@Nonnull BatchJobSender theBatchJobSender,
|
||||
@Nonnull WorkChunkProcessor theExecutorSvc,
|
||||
@Nonnull IJobMaintenanceService theJobMaintenanceService) {
|
||||
@Nonnull IJobMaintenanceService theJobMaintenanceService,
|
||||
IHapiTransactionService theHapiTransactionService) {
|
||||
myJobPersistence = theJobPersistence;
|
||||
myJobDefinitionRegistry = theJobDefinitionRegistry;
|
||||
myHapiTransactionService = theHapiTransactionService;
|
||||
myJobStepExecutorFactory = new JobStepExecutorFactory(theJobPersistence, theBatchJobSender, theExecutorSvc, theJobMaintenanceService, theJobDefinitionRegistry);
|
||||
myJobInstanceStatusUpdater = new JobInstanceStatusUpdater(theJobPersistence, theJobDefinitionRegistry);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -68,55 +66,185 @@ class WorkChannelMessageHandler implements MessageHandler {
|
|||
handleWorkChannelMessage((JobWorkNotificationJsonMessage) theMessage);
|
||||
}
|
||||
|
||||
/**
|
||||
* Workflow scratchpad for processing a single chunk message.
|
||||
*/
|
||||
class MessageProcess {
|
||||
final JobWorkNotification myWorkNotification;
|
||||
String myChunkId;
|
||||
WorkChunk myWorkChunk;
|
||||
JobWorkCursor<?, ?, ?> myCursor;
|
||||
JobInstance myJobInstance;
|
||||
JobDefinition<?> myJobDefinition;
|
||||
JobStepExecutor<?, ?, ?> myStepExector;
|
||||
|
||||
MessageProcess(JobWorkNotification theWorkNotification) {
|
||||
myWorkNotification = theWorkNotification;
|
||||
}
|
||||
|
||||
/**
|
||||
* Save the chunkId and validate.
|
||||
*/
|
||||
Optional<MessageProcess> validateChunkId() {
|
||||
myChunkId = myWorkNotification.getChunkId();
|
||||
if (myChunkId == null) {
|
||||
ourLog.error("Received work notification with null chunkId: {}", myWorkNotification);
|
||||
return Optional.empty();
|
||||
}
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
Optional<MessageProcess> loadJobDefinitionOrThrow() {
|
||||
String jobDefinitionId = myWorkNotification.getJobDefinitionId();
|
||||
int jobDefinitionVersion = myWorkNotification.getJobDefinitionVersion();
|
||||
|
||||
// Do not catch this exception - that will discard this chunk.
|
||||
// Failing to load a job definition probably means this is an old process during upgrade.
|
||||
// Retry those until this node is killed/restarted.
|
||||
myJobDefinition = myJobDefinitionRegistry.getJobDefinitionOrThrowException(jobDefinitionId, jobDefinitionVersion);
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch the job instance including the job definition.
|
||||
*/
|
||||
Optional<MessageProcess> loadJobInstance() {
|
||||
return myJobPersistence.fetchInstance(myWorkNotification.getInstanceId())
|
||||
.or(()->{
|
||||
ourLog.error("No instance {} exists for chunk notification {}", myWorkNotification.getInstanceId(), myWorkNotification);
|
||||
return Optional.empty();
|
||||
})
|
||||
.map(instance->{
|
||||
myJobInstance = instance;
|
||||
instance.setJobDefinition(myJobDefinition);
|
||||
return this;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Load the chunk, and mark it as dequeued.
|
||||
*/
|
||||
Optional<MessageProcess> updateChunkStatusAndValidate() {
|
||||
return myJobPersistence.onWorkChunkDequeue(myChunkId)
|
||||
.or(()->{
|
||||
ourLog.error("Unable to find chunk with ID {} - Aborting. {}", myChunkId, myWorkNotification);
|
||||
return Optional.empty();
|
||||
})
|
||||
.map(chunk->{
|
||||
myWorkChunk = chunk;
|
||||
ourLog.debug("Worker picked up chunk. [chunkId={}, stepId={}, startTime={}]", myChunkId, myWorkChunk.getTargetStepId(), myWorkChunk.getStartTime());
|
||||
return this;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Move QUEUED jobs to IN_PROGRESS, and make sure we are not already in final state.
|
||||
*/
|
||||
Optional<MessageProcess> updateAndValidateJobStatus() {
|
||||
ourLog.trace("Check status {} of job {} for chunk {}", myJobInstance.getStatus(), myJobInstance.getInstanceId(), myChunkId);
|
||||
switch (myJobInstance.getStatus()) {
|
||||
case QUEUED:
|
||||
// Update the job as started.
|
||||
myJobPersistence.onChunkDequeued(myJobInstance.getInstanceId());
|
||||
break;
|
||||
|
||||
case IN_PROGRESS:
|
||||
case ERRORED:
|
||||
case FINALIZE:
|
||||
// normal processing
|
||||
break;
|
||||
|
||||
case COMPLETED:
|
||||
// this is an error, but we can't do much about it.
|
||||
ourLog.error("Received chunk {}, but job instance is {}. Skipping.", myChunkId, myJobInstance.getStatus());
|
||||
return Optional.empty();
|
||||
|
||||
case CANCELLED:
|
||||
case FAILED:
|
||||
default:
|
||||
// should we mark the chunk complete/failed for any of these skipped?
|
||||
ourLog.info("Skipping chunk {} because job instance is {}", myChunkId, myJobInstance.getStatus());
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
|
||||
Optional<MessageProcess> buildCursor() {
|
||||
|
||||
myCursor = JobWorkCursor.fromJobDefinitionAndRequestedStepId(myJobDefinition, myWorkNotification.getTargetStepId());
|
||||
|
||||
if (!myWorkChunk.getTargetStepId().equals(myCursor.getCurrentStepId())) {
|
||||
ourLog.error("Chunk {} has target step {} but expected {}", myChunkId, myWorkChunk.getTargetStepId(), myCursor.getCurrentStepId());
|
||||
return Optional.empty();
|
||||
}
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
public Optional<MessageProcess> buildStepExecutor() {
|
||||
this.myStepExector = myJobStepExecutorFactory.newJobStepExecutor(this.myJobInstance, this.myWorkChunk, this.myCursor);
|
||||
|
||||
return Optional.of(this);
|
||||
}
|
||||
}
|
||||
|
||||
private void handleWorkChannelMessage(JobWorkNotificationJsonMessage theMessage) {
|
||||
JobWorkNotification workNotification = theMessage.getPayload();
|
||||
ourLog.info("Received work notification for {}", workNotification);
|
||||
|
||||
String chunkId = workNotification.getChunkId();
|
||||
Validate.notNull(chunkId);
|
||||
// There are three paths through this code:
|
||||
// 1. Normal execution. We validate, load, update statuses, all in a tx. Then we proces the chunk.
|
||||
// 2. Discard chunk. If some validation fails (e.g. no chunk with that id), we log and discard the chunk.
|
||||
// Probably a db rollback, with a stale queue.
|
||||
// 3. Fail and retry. If we throw an exception out of here, Spring will put the queue message back, and redeliver later.
|
||||
//
|
||||
// We use Optional chaining here to simplify all the cases where we short-circuit exit.
|
||||
// A step that returns an empty Optional means discard the chunk.
|
||||
//
|
||||
executeInTxRollbackWhenEmpty(() -> (
|
||||
// Use a chain of Optional flatMap to handle all the setup short-circuit exits cleanly.
|
||||
Optional.of(new MessageProcess(workNotification))
|
||||
// validate and load info
|
||||
.flatMap(MessageProcess::validateChunkId)
|
||||
// no job definition should be retried - we must be a stale process encountering a new job definition.
|
||||
.flatMap(MessageProcess::loadJobDefinitionOrThrow)
|
||||
.flatMap(MessageProcess::loadJobInstance)
|
||||
// update statuses now in the db: QUEUED->IN_PROGRESS
|
||||
.flatMap(MessageProcess::updateChunkStatusAndValidate)
|
||||
.flatMap(MessageProcess::updateAndValidateJobStatus)
|
||||
// ready to execute
|
||||
.flatMap(MessageProcess::buildCursor)
|
||||
.flatMap(MessageProcess::buildStepExecutor)
|
||||
))
|
||||
.ifPresentOrElse(
|
||||
// all the setup is happy and committed. Do the work.
|
||||
process -> process.myStepExector.executeStep(),
|
||||
// discard the chunk
|
||||
() -> ourLog.debug("Discarding chunk notification {}", workNotification)
|
||||
);
|
||||
|
||||
JobWorkCursor<?, ?, ?> cursor = null;
|
||||
WorkChunk workChunk = null;
|
||||
Optional<WorkChunk> chunkOpt = myJobPersistence.onWorkChunkDequeue(chunkId);
|
||||
if (chunkOpt.isEmpty()) {
|
||||
ourLog.error("Unable to find chunk with ID {} - Aborting", chunkId);
|
||||
return;
|
||||
}
|
||||
workChunk = chunkOpt.get();
|
||||
ourLog.debug("Worker picked up chunk. [chunkId={}, stepId={}, startTime={}]", chunkId, workChunk.getTargetStepId(), workChunk.getStartTime());
|
||||
|
||||
cursor = buildCursorFromNotification(workNotification);
|
||||
|
||||
Validate.isTrue(workChunk.getTargetStepId().equals(cursor.getCurrentStepId()), "Chunk %s has target step %s but expected %s", chunkId, workChunk.getTargetStepId(), cursor.getCurrentStepId());
|
||||
|
||||
Optional<JobInstance> instanceOpt = myJobPersistence.fetchInstance(workNotification.getInstanceId());
|
||||
JobInstance instance = instanceOpt.orElseThrow(() -> new InternalErrorException("Unknown instance: " + workNotification.getInstanceId()));
|
||||
markInProgressIfQueued(instance);
|
||||
myJobDefinitionRegistry.setJobDefinition(instance);
|
||||
String instanceId = instance.getInstanceId();
|
||||
|
||||
if (instance.isCancelled()) {
|
||||
ourLog.info("Skipping chunk {} because job instance is cancelled", chunkId);
|
||||
myJobPersistence.markInstanceAsStatus(instanceId, StatusEnum.CANCELLED);
|
||||
return;
|
||||
}
|
||||
|
||||
JobStepExecutor<?,?,?> stepExecutor = myJobStepExecutorFactory.newJobStepExecutor(instance, workChunk, cursor);
|
||||
stepExecutor.executeStep();
|
||||
}
|
||||
|
||||
private void markInProgressIfQueued(JobInstance theInstance) {
|
||||
if (theInstance.getStatus() == StatusEnum.QUEUED) {
|
||||
myJobInstanceStatusUpdater.updateInstanceStatus(theInstance, StatusEnum.IN_PROGRESS);
|
||||
}
|
||||
/**
|
||||
* Run theCallback in TX, rolling back if the supplied Optional is empty.
|
||||
*/
|
||||
<T> Optional<T> executeInTxRollbackWhenEmpty(Supplier<Optional<T>> theCallback) {
|
||||
return myHapiTransactionService.withSystemRequest()
|
||||
.execute(theTransactionStatus -> {
|
||||
|
||||
// run the processing
|
||||
Optional<T> setupProcessing = theCallback.get();
|
||||
|
||||
if (setupProcessing.isEmpty()) {
|
||||
// If any setup failed, roll back the chunk and instance status changes.
|
||||
ourLog.debug("WorkChunk setup tx rollback");
|
||||
theTransactionStatus.setRollbackOnly();
|
||||
}
|
||||
// else COMMIT the work.
|
||||
|
||||
return setupProcessing;
|
||||
});
|
||||
}
|
||||
|
||||
private JobWorkCursor<?, ?, ?> buildCursorFromNotification(JobWorkNotification workNotification) {
|
||||
String jobDefinitionId = workNotification.getJobDefinitionId();
|
||||
int jobDefinitionVersion = workNotification.getJobDefinitionVersion();
|
||||
|
||||
JobDefinition<?> definition = myJobDefinitionRegistry.getJobDefinitionOrThrowException(jobDefinitionId, jobDefinitionVersion);
|
||||
|
||||
return JobWorkCursor.fromJobDefinitionAndRequestedStepId(definition, workNotification.getTargetStepId());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,12 +29,10 @@ import ca.uhn.fhir.batch2.model.JobDefinitionStep;
|
|||
import ca.uhn.fhir.batch2.model.JobInstance;
|
||||
import ca.uhn.fhir.batch2.model.JobWorkCursor;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunk;
|
||||
import ca.uhn.fhir.jpa.dao.tx.IHapiTransactionService;
|
||||
import ca.uhn.fhir.model.api.IModelJson;
|
||||
import ca.uhn.fhir.util.Logs;
|
||||
import org.apache.commons.lang3.Validate;
|
||||
import org.slf4j.Logger;
|
||||
import org.springframework.transaction.PlatformTransactionManager;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import java.util.Optional;
|
||||
|
|
|
@ -39,8 +39,8 @@ import static java.util.Collections.emptyList;
|
|||
import static org.apache.commons.lang3.ObjectUtils.defaultIfNull;
|
||||
|
||||
/**
|
||||
* While performing cleanup, the cleanup job loads all of the known
|
||||
* work chunks to examine their status. This bean collects the counts that
|
||||
* While performing cleanup, the cleanup job loads all work chunks
|
||||
* to examine their status. This bean collects the counts that
|
||||
* are found, so that they can be reused for maintenance jobs without
|
||||
* needing to hit the database a second time.
|
||||
*/
|
||||
|
@ -50,10 +50,6 @@ public class JobChunkProgressAccumulator {
|
|||
private final Set<String> myConsumedInstanceAndChunkIds = new HashSet<>();
|
||||
private final Multimap<String, ChunkStatusCountValue> myInstanceIdToChunkStatuses = ArrayListMultimap.create();
|
||||
|
||||
int countChunksWithStatus(String theInstanceId, String theStepId, WorkChunkStatusEnum... theStatuses) {
|
||||
return getChunkIdsWithStatus(theInstanceId, theStepId, theStatuses).size();
|
||||
}
|
||||
|
||||
int getTotalChunkCountForInstanceAndStep(String theInstanceId, String theStepId) {
|
||||
return myInstanceIdToChunkStatuses.get(theInstanceId).stream().filter(chunkCount -> chunkCount.myStepId.equals(theStepId)).collect(Collectors.toList()).size();
|
||||
}
|
||||
|
@ -79,7 +75,7 @@ public class JobChunkProgressAccumulator {
|
|||
// Note: If chunks are being written while we're executing, we may see the same chunk twice. This
|
||||
// check avoids adding it twice.
|
||||
if (myConsumedInstanceAndChunkIds.add(instanceId + " " + chunkId)) {
|
||||
ourLog.debug("Adding chunk to accumulator. [chunkId={}, instanceId={}, status={}]", chunkId, instanceId, theChunk.getStatus());
|
||||
ourLog.debug("Adding chunk to accumulator. [chunkId={}, instanceId={}, status={}, step={}]", chunkId, instanceId, theChunk.getStatus(), theChunk.getTargetStepId());
|
||||
myInstanceIdToChunkStatuses.put(instanceId, new ChunkStatusCountValue(chunkId, theChunk.getTargetStepId(), theChunk.getStatus()));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,11 +27,14 @@ import ca.uhn.fhir.batch2.model.JobDefinition;
|
|||
import ca.uhn.fhir.batch2.model.JobInstance;
|
||||
import ca.uhn.fhir.batch2.model.JobWorkCursor;
|
||||
import ca.uhn.fhir.batch2.model.JobWorkNotification;
|
||||
import ca.uhn.fhir.batch2.model.StatusEnum;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunkStatusEnum;
|
||||
import ca.uhn.fhir.batch2.progress.InstanceProgress;
|
||||
import ca.uhn.fhir.batch2.progress.JobInstanceProgressCalculator;
|
||||
import ca.uhn.fhir.batch2.progress.JobInstanceStatusUpdater;
|
||||
import ca.uhn.fhir.model.api.IModelJson;
|
||||
import ca.uhn.fhir.util.Logs;
|
||||
import ca.uhn.fhir.util.StopWatch;
|
||||
import org.apache.commons.lang3.time.DateUtils;
|
||||
import org.slf4j.Logger;
|
||||
|
||||
|
@ -50,7 +53,7 @@ public class JobInstanceProcessor {
|
|||
private final String myInstanceId;
|
||||
private final JobDefinitionRegistry myJobDefinitionegistry;
|
||||
|
||||
JobInstanceProcessor(IJobPersistence theJobPersistence,
|
||||
public JobInstanceProcessor(IJobPersistence theJobPersistence,
|
||||
BatchJobSender theBatchJobSender,
|
||||
String theInstanceId,
|
||||
JobChunkProgressAccumulator theProgressAccumulator,
|
||||
|
@ -63,26 +66,42 @@ public class JobInstanceProcessor {
|
|||
myReductionStepExecutorService = theReductionStepExecutorService;
|
||||
myJobDefinitionegistry = theJobDefinitionRegistry;
|
||||
myJobInstanceProgressCalculator = new JobInstanceProgressCalculator(theJobPersistence, theProgressAccumulator, theJobDefinitionRegistry);
|
||||
myJobInstanceStatusUpdater = new JobInstanceStatusUpdater(theJobPersistence, theJobDefinitionRegistry);
|
||||
myJobInstanceStatusUpdater = new JobInstanceStatusUpdater(theJobDefinitionRegistry);
|
||||
}
|
||||
|
||||
public void process() {
|
||||
ourLog.debug("Starting job processing: {}", myInstanceId);
|
||||
StopWatch stopWatch = new StopWatch();
|
||||
|
||||
JobInstance theInstance = myJobPersistence.fetchInstance(myInstanceId).orElse(null);
|
||||
if (theInstance == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
handleCancellation(theInstance);
|
||||
boolean cancelUpdate = handleCancellation(theInstance);
|
||||
if (cancelUpdate) {
|
||||
// reload after update
|
||||
theInstance = myJobPersistence.fetchInstance(myInstanceId).orElseThrow();
|
||||
}
|
||||
cleanupInstance(theInstance);
|
||||
triggerGatedExecutions(theInstance);
|
||||
|
||||
ourLog.debug("Finished job processing: {} - {}", myInstanceId, stopWatch);
|
||||
}
|
||||
|
||||
// wipmb should we delete this? Or reduce it to an instance event?
|
||||
private void handleCancellation(JobInstance theInstance) {
|
||||
private boolean handleCancellation(JobInstance theInstance) {
|
||||
if (theInstance.isPendingCancellationRequest()) {
|
||||
theInstance.setErrorMessage(buildCancelledMessage(theInstance));
|
||||
myJobInstanceStatusUpdater.setCancelled(theInstance);
|
||||
String errorMessage = buildCancelledMessage(theInstance);
|
||||
ourLog.info("Job {} moving to CANCELLED", theInstance.getInstanceId());
|
||||
return myJobPersistence.updateInstance(theInstance.getInstanceId(), instance -> {
|
||||
boolean changed = myJobInstanceStatusUpdater.updateInstanceStatus(instance, StatusEnum.CANCELLED);
|
||||
if (changed) {
|
||||
instance.setErrorMessage(errorMessage);
|
||||
}
|
||||
return changed;
|
||||
});
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private String buildCancelledMessage(JobInstance theInstance) {
|
||||
|
@ -99,12 +118,12 @@ public class JobInstanceProcessor {
|
|||
// If we're still QUEUED, there are no stats to calculate
|
||||
break;
|
||||
case FINALIZE:
|
||||
// If we're in FINALIZE, the reduction step is working so we should stay out of the way until it
|
||||
// If we're in FINALIZE, the reduction step is working, so we should stay out of the way until it
|
||||
// marks the job as COMPLETED
|
||||
return;
|
||||
case IN_PROGRESS:
|
||||
case ERRORED:
|
||||
myJobInstanceProgressCalculator.calculateAndStoreInstanceProgress(theInstance);
|
||||
myJobInstanceProgressCalculator.calculateAndStoreInstanceProgress(theInstance.getInstanceId());
|
||||
break;
|
||||
case COMPLETED:
|
||||
case FAILED:
|
||||
|
@ -118,11 +137,15 @@ public class JobInstanceProcessor {
|
|||
}
|
||||
|
||||
if (theInstance.isFinished() && !theInstance.isWorkChunksPurged()) {
|
||||
myJobInstanceProgressCalculator.calculateInstanceProgressAndPopulateInstance(theInstance);
|
||||
|
||||
theInstance.setWorkChunksPurged(true);
|
||||
myJobPersistence.deleteChunksAndMarkInstanceAsChunksPurged(theInstance.getInstanceId());
|
||||
myJobPersistence.updateInstance(theInstance);
|
||||
|
||||
InstanceProgress progress = myJobInstanceProgressCalculator.calculateInstanceProgress(theInstance.getInstanceId());
|
||||
|
||||
myJobPersistence.updateInstance(theInstance.getInstanceId(), instance->{
|
||||
progress.updateInstance(instance);
|
||||
instance.setWorkChunksPurged(true);
|
||||
return true;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -185,14 +208,32 @@ public class JobInstanceProcessor {
|
|||
if (totalChunksForNextStep != queuedChunksForNextStep.size()) {
|
||||
ourLog.debug("Total ProgressAccumulator QUEUED chunk count does not match QUEUED chunk size! [instanceId={}, stepId={}, totalChunks={}, queuedChunks={}]", instanceId, nextStepId, totalChunksForNextStep, queuedChunksForNextStep.size());
|
||||
}
|
||||
// Note on sequence: we don't have XA transactions, and are talking to two stores (JPA + Queue)
|
||||
// Sequence: 1 - So we run the query to minimize the work overlapping.
|
||||
List<String> chunksToSubmit = myJobPersistence.fetchAllChunkIdsForStepWithStatus(instanceId, nextStepId, WorkChunkStatusEnum.QUEUED);
|
||||
// Sequence: 2 - update the job step so the workers will process them.
|
||||
boolean changed = myJobPersistence.updateInstance(instanceId, instance -> {
|
||||
if (instance.getCurrentGatedStepId().equals(nextStepId)) {
|
||||
// someone else beat us here. No changes
|
||||
return false;
|
||||
}
|
||||
instance.setCurrentGatedStepId(nextStepId);
|
||||
return true;
|
||||
});
|
||||
if (!changed) {
|
||||
// we collided with another maintenance job.
|
||||
return;
|
||||
}
|
||||
|
||||
// DESIGN GAP: if we die here, these chunks will never be queued.
|
||||
// Need a WAITING stage before QUEUED for chunks, so we can catch them.
|
||||
|
||||
// Sequence: 3 - send the notifications
|
||||
for (String nextChunkId : chunksToSubmit) {
|
||||
JobWorkNotification workNotification = new JobWorkNotification(theInstance, nextStepId, nextChunkId);
|
||||
myBatchJobSender.sendWorkChannelMessage(workNotification);
|
||||
}
|
||||
ourLog.debug("Submitted a batch of chunks for processing. [chunkCount={}, instanceId={}, stepId={}]", chunksToSubmit.size(), instanceId, nextStepId);
|
||||
theInstance.setCurrentGatedStepId(nextStepId);
|
||||
myJobPersistence.updateInstance(theInstance);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -200,6 +200,7 @@ public class JobMaintenanceServiceImpl implements IJobMaintenanceService, IHasSc
|
|||
return;
|
||||
}
|
||||
try {
|
||||
ourLog.info("Maintenance pass starting.");
|
||||
doMaintenancePass();
|
||||
} catch (Exception e) {
|
||||
ourLog.error("Maintenance pass failed", e);
|
||||
|
@ -221,7 +222,7 @@ public class JobMaintenanceServiceImpl implements IJobMaintenanceService, IHasSc
|
|||
myJobDefinitionRegistry.setJobDefinition(instance);
|
||||
JobInstanceProcessor jobInstanceProcessor = new JobInstanceProcessor(myJobPersistence,
|
||||
myBatchJobSender, instanceId, progressAccumulator, myReductionStepExecutorService, myJobDefinitionRegistry);
|
||||
ourLog.debug("Triggering maintenance process for instance {} in status {}", instanceId, instance.getStatus().name());
|
||||
ourLog.debug("Triggering maintenance process for instance {} in status {}", instanceId, instance.getStatus());
|
||||
jobInstanceProcessor.process();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import ca.uhn.fhir.batch2.api.IJobInstance;
|
|||
import ca.uhn.fhir.jpa.util.JsonDateDeserializer;
|
||||
import ca.uhn.fhir.jpa.util.JsonDateSerializer;
|
||||
import ca.uhn.fhir.model.api.IModelJson;
|
||||
import ca.uhn.fhir.util.JsonUtil;
|
||||
import ca.uhn.fhir.util.Logs;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
|
||||
|
@ -34,7 +35,13 @@ import java.util.Date;
|
|||
|
||||
import static org.apache.commons.lang3.StringUtils.isBlank;
|
||||
|
||||
public class JobInstance extends JobInstanceStartRequest implements IModelJson, IJobInstance {
|
||||
public class JobInstance implements IModelJson, IJobInstance {
|
||||
|
||||
@JsonProperty(value = "jobDefinitionId")
|
||||
private String myJobDefinitionId;
|
||||
|
||||
@JsonProperty(value = "parameters")
|
||||
private String myParameters;
|
||||
|
||||
@JsonProperty(value = "jobDefinitionVersion")
|
||||
private int myJobDefinitionVersion;
|
||||
|
@ -113,7 +120,8 @@ public class JobInstance extends JobInstanceStartRequest implements IModelJson,
|
|||
* Copy constructor
|
||||
*/
|
||||
public JobInstance(JobInstance theJobInstance) {
|
||||
super(theJobInstance);
|
||||
setJobDefinitionId(theJobInstance.getJobDefinitionId());
|
||||
setParameters(theJobInstance.getParameters());
|
||||
setCancelled(theJobInstance.isCancelled());
|
||||
setFastTracking(theJobInstance.isFastTracking());
|
||||
setCombinedRecordsProcessed(theJobInstance.getCombinedRecordsProcessed());
|
||||
|
@ -135,6 +143,34 @@ public class JobInstance extends JobInstanceStartRequest implements IModelJson,
|
|||
setReport(theJobInstance.getReport());
|
||||
}
|
||||
|
||||
|
||||
public String getJobDefinitionId() {
|
||||
return myJobDefinitionId;
|
||||
}
|
||||
|
||||
public void setJobDefinitionId(String theJobDefinitionId) {
|
||||
myJobDefinitionId = theJobDefinitionId;
|
||||
}
|
||||
|
||||
public String getParameters() {
|
||||
return myParameters;
|
||||
}
|
||||
|
||||
public void setParameters(String theParameters) {
|
||||
myParameters = theParameters;
|
||||
}
|
||||
|
||||
public <T extends IModelJson> T getParameters(Class<T> theType) {
|
||||
if (myParameters == null) {
|
||||
return null;
|
||||
}
|
||||
return JsonUtil.deserialize(myParameters, theType);
|
||||
}
|
||||
|
||||
public void setParameters(IModelJson theParameters) {
|
||||
myParameters = JsonUtil.serializeOrInvalidRequest(theParameters);
|
||||
}
|
||||
|
||||
public void setUpdateTime(Date theUpdateTime) {
|
||||
myUpdateTime = theUpdateTime;
|
||||
}
|
||||
|
|
|
@ -21,12 +21,14 @@ package ca.uhn.fhir.batch2.model;
|
|||
|
||||
import ca.uhn.fhir.i18n.Msg;
|
||||
import ca.uhn.fhir.util.Logs;
|
||||
import com.google.common.collect.Maps;
|
||||
import org.slf4j.Logger;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumMap;
|
||||
import java.util.EnumSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
|
@ -58,7 +60,7 @@ public enum StatusEnum {
|
|||
* Task execution resulted in an error but the error may be transient (or transient status is unknown).
|
||||
* Retrying may result in success.
|
||||
*/
|
||||
ERRORED(true, true, true),
|
||||
ERRORED(true, false, true),
|
||||
|
||||
/**
|
||||
* Task has failed and is known to be unrecoverable. There is no reason to believe that retrying will
|
||||
|
@ -74,29 +76,29 @@ public enum StatusEnum {
|
|||
private static final Logger ourLog = Logs.getBatchTroubleshootingLog();
|
||||
|
||||
/** Map from state to Set of legal inbound states */
|
||||
static final EnumMap<StatusEnum, Set<StatusEnum>> ourFromStates;
|
||||
static final Map<StatusEnum, Set<StatusEnum>> ourFromStates;
|
||||
/** Map from state to Set of legal outbound states */
|
||||
static final EnumMap<StatusEnum, Set<StatusEnum>> ourToStates;
|
||||
static final Map<StatusEnum, Set<StatusEnum>> ourToStates;
|
||||
|
||||
static {
|
||||
// wipmb make immutable.
|
||||
ourFromStates = new EnumMap<>(StatusEnum.class);
|
||||
ourToStates = new EnumMap<>(StatusEnum.class);
|
||||
Set<StatusEnum> cancelableStates = EnumSet.noneOf(StatusEnum.class);
|
||||
|
||||
EnumMap<StatusEnum, Set<StatusEnum>> fromStates = new EnumMap<>(StatusEnum.class);
|
||||
EnumMap<StatusEnum, Set<StatusEnum>> toStates = new EnumMap<>(StatusEnum.class);
|
||||
|
||||
for (StatusEnum nextEnum: StatusEnum.values()) {
|
||||
ourFromStates.put(nextEnum, EnumSet.noneOf(StatusEnum.class));
|
||||
ourToStates.put(nextEnum, EnumSet.noneOf(StatusEnum.class));
|
||||
fromStates.put(nextEnum, EnumSet.noneOf(StatusEnum.class));
|
||||
toStates.put(nextEnum, EnumSet.noneOf(StatusEnum.class));
|
||||
}
|
||||
for (StatusEnum nextPriorEnum: StatusEnum.values()) {
|
||||
for (StatusEnum nextNextEnum: StatusEnum.values()) {
|
||||
if (isLegalStateTransition(nextPriorEnum, nextNextEnum)) {
|
||||
ourFromStates.get(nextNextEnum).add(nextPriorEnum);
|
||||
ourToStates.get(nextPriorEnum).add(nextNextEnum);
|
||||
fromStates.get(nextNextEnum).add(nextPriorEnum);
|
||||
toStates.get(nextPriorEnum).add(nextNextEnum);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ourFromStates = Maps.immutableEnumMap(fromStates);
|
||||
ourToStates = Maps.immutableEnumMap(toStates);
|
||||
}
|
||||
|
||||
private final boolean myIncomplete;
|
||||
|
@ -159,7 +161,6 @@ public enum StatusEnum {
|
|||
return retVal;
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
private static void initializeStaticEndedStatuses() {
|
||||
EnumSet<StatusEnum> endedSet = EnumSet.noneOf(StatusEnum.class);
|
||||
EnumSet<StatusEnum> notEndedSet = EnumSet.noneOf(StatusEnum.class);
|
||||
|
@ -175,10 +176,7 @@ public enum StatusEnum {
|
|||
}
|
||||
|
||||
public static boolean isLegalStateTransition(StatusEnum theOrigStatus, StatusEnum theNewStatus) {
|
||||
if (theOrigStatus == theNewStatus) {
|
||||
return true;
|
||||
}
|
||||
Boolean canTransition;
|
||||
boolean canTransition;
|
||||
switch (theOrigStatus) {
|
||||
case QUEUED:
|
||||
// initial state can transition to anything
|
||||
|
@ -188,30 +186,29 @@ public enum StatusEnum {
|
|||
canTransition = theNewStatus != QUEUED;
|
||||
break;
|
||||
case ERRORED:
|
||||
canTransition = theNewStatus == FAILED || theNewStatus == COMPLETED || theNewStatus == CANCELLED;
|
||||
canTransition = theNewStatus == FAILED || theNewStatus == COMPLETED || theNewStatus == CANCELLED || theNewStatus == ERRORED;
|
||||
break;
|
||||
case COMPLETED:
|
||||
case CANCELLED:
|
||||
case FAILED:
|
||||
// terminal state cannot transition
|
||||
canTransition = false;
|
||||
break;
|
||||
case COMPLETED:
|
||||
canTransition = false;
|
||||
break;
|
||||
case FAILED:
|
||||
canTransition = theNewStatus == FAILED;
|
||||
break;
|
||||
case FINALIZE:
|
||||
canTransition = theNewStatus != QUEUED && theNewStatus != IN_PROGRESS;
|
||||
break;
|
||||
default:
|
||||
canTransition = null;
|
||||
break;
|
||||
throw new IllegalStateException(Msg.code(2131) + "Unknown batch state " + theOrigStatus);
|
||||
}
|
||||
|
||||
if (canTransition == null){
|
||||
throw new IllegalStateException(Msg.code(2131) + "Unknown batch state " + theOrigStatus);
|
||||
} else {
|
||||
if (!canTransition) {
|
||||
ourLog.trace("Tried to execute an illegal state transition. [origStatus={}, newStatus={}]", theOrigStatus, theNewStatus);
|
||||
}
|
||||
return canTransition;
|
||||
if (!canTransition) {
|
||||
ourLog.trace("Tried to execute an illegal state transition. [origStatus={}, newStatus={}]", theOrigStatus, theNewStatus);
|
||||
}
|
||||
return canTransition;
|
||||
}
|
||||
|
||||
public boolean isIncomplete() {
|
||||
|
@ -234,7 +231,7 @@ public enum StatusEnum {
|
|||
}
|
||||
|
||||
/**
|
||||
* States this state may transtion to.
|
||||
* States this state may transotion to.
|
||||
*/
|
||||
public Set<StatusEnum> getNextStates() {
|
||||
return ourToStates.get(this);
|
||||
|
|
|
@ -45,7 +45,7 @@ public class WorkChunk implements IModelJson {
|
|||
private String myId;
|
||||
|
||||
@JsonProperty("sequence")
|
||||
// wipmb this seems unused.
|
||||
// TODO MB danger - these repeat with a job or even a single step. They start at 0 for every parent chunk. Review after merge.
|
||||
private int mySequence;
|
||||
|
||||
@JsonProperty("status")
|
||||
|
|
|
@ -19,7 +19,8 @@
|
|||
*/
|
||||
package ca.uhn.fhir.batch2.model;
|
||||
|
||||
import ca.uhn.fhir.batch2.coordinator.BatchWorkChunk;
|
||||
import org.apache.commons.lang3.builder.EqualsBuilder;
|
||||
import org.apache.commons.lang3.builder.HashCodeBuilder;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import javax.annotation.Nullable;
|
||||
|
@ -29,7 +30,14 @@ import javax.annotation.Nullable;
|
|||
* Payload for the work-chunk creation event including all the job coordinates, the chunk data, and a sequence within the step.
|
||||
* @see hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/docs/server_jpa_batch/batch2_states.md
|
||||
*/
|
||||
public class WorkChunkCreateEvent extends BatchWorkChunk {
|
||||
public class WorkChunkCreateEvent {
|
||||
public final String jobDefinitionId;
|
||||
public final int jobDefinitionVersion;
|
||||
public final String targetStepId;
|
||||
public final String instanceId;
|
||||
public final int sequence;
|
||||
public final String serializedData;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
*
|
||||
|
@ -37,10 +45,51 @@ public class WorkChunkCreateEvent extends BatchWorkChunk {
|
|||
* @param theJobDefinitionVersion The job definition version
|
||||
* @param theTargetStepId The step ID that will be responsible for consuming this chunk
|
||||
* @param theInstanceId The instance ID associated with this chunk
|
||||
* @param theSequence
|
||||
* @param theSerializedData The data. This will be in the form of a map where the values may be strings, lists, and other maps (i.e. JSON)
|
||||
*/
|
||||
public WorkChunkCreateEvent(@Nonnull String theJobDefinitionId, int theJobDefinitionVersion, @Nonnull String theTargetStepId, @Nonnull String theInstanceId, int theSequence, @Nullable String theSerializedData) {
|
||||
super(theJobDefinitionId, theJobDefinitionVersion, theTargetStepId, theInstanceId, theSequence, theSerializedData);
|
||||
jobDefinitionId = theJobDefinitionId;
|
||||
jobDefinitionVersion = theJobDefinitionVersion;
|
||||
targetStepId = theTargetStepId;
|
||||
instanceId = theInstanceId;
|
||||
sequence = theSequence;
|
||||
serializedData = theSerializedData;
|
||||
}
|
||||
|
||||
public static WorkChunkCreateEvent firstChunk(JobDefinition<?> theJobDefinition, String theInstanceId) {
|
||||
String firstStepId = theJobDefinition.getFirstStepId();
|
||||
String jobDefinitionId = theJobDefinition.getJobDefinitionId();
|
||||
int jobDefinitionVersion = theJobDefinition.getJobDefinitionVersion();
|
||||
return new WorkChunkCreateEvent(jobDefinitionId, jobDefinitionVersion, firstStepId, theInstanceId, 0, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object theO) {
|
||||
if (this == theO) return true;
|
||||
|
||||
if (theO == null || getClass() != theO.getClass()) return false;
|
||||
|
||||
WorkChunkCreateEvent that = (WorkChunkCreateEvent) theO;
|
||||
|
||||
return new EqualsBuilder()
|
||||
.append(jobDefinitionId, that.jobDefinitionId)
|
||||
.append(jobDefinitionVersion, that.jobDefinitionVersion)
|
||||
.append(targetStepId, that.targetStepId)
|
||||
.append(instanceId, that.instanceId)
|
||||
.append(sequence, that.sequence)
|
||||
.append(serializedData, that.serializedData)
|
||||
.isEquals();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return new HashCodeBuilder(17, 37)
|
||||
.append(jobDefinitionId)
|
||||
.append(jobDefinitionVersion)
|
||||
.append(targetStepId)
|
||||
.append(instanceId)
|
||||
.append(sequence)
|
||||
.append(serializedData)
|
||||
.toHashCode();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,8 +23,6 @@ package ca.uhn.fhir.batch2.model;
|
|||
import org.apache.commons.lang3.builder.EqualsBuilder;
|
||||
import org.apache.commons.lang3.builder.HashCodeBuilder;
|
||||
|
||||
import static ca.uhn.fhir.batch2.coordinator.WorkChunkProcessor.MAX_CHUNK_ERROR_COUNT;
|
||||
|
||||
/**
|
||||
* Payload for the work-chunk error event including the error message, and the allowed retry count.
|
||||
* @see hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/docs/server_jpa_batch/batch2_states.md
|
||||
|
@ -32,7 +30,6 @@ import static ca.uhn.fhir.batch2.coordinator.WorkChunkProcessor.MAX_CHUNK_ERROR_
|
|||
public class WorkChunkErrorEvent extends BaseWorkChunkEvent {
|
||||
|
||||
private String myErrorMsg;
|
||||
private int maxRetries = MAX_CHUNK_ERROR_COUNT;
|
||||
|
||||
public WorkChunkErrorEvent(String theChunkId) {
|
||||
super(theChunkId);
|
||||
|
@ -52,15 +49,6 @@ public class WorkChunkErrorEvent extends BaseWorkChunkEvent {
|
|||
return this;
|
||||
}
|
||||
|
||||
public int getMaxRetries() {
|
||||
return maxRetries;
|
||||
}
|
||||
|
||||
// wipmb - will we ever want this?
|
||||
public void setMaxRetries(int theMaxRetries) {
|
||||
maxRetries = theMaxRetries;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object theO) {
|
||||
if (this == theO) return true;
|
||||
|
@ -73,7 +61,6 @@ public class WorkChunkErrorEvent extends BaseWorkChunkEvent {
|
|||
.appendSuper(super.equals(theO))
|
||||
.append(myChunkId, that.myChunkId)
|
||||
.append(myErrorMsg, that.myErrorMsg)
|
||||
.append(maxRetries, that.maxRetries)
|
||||
.isEquals();
|
||||
}
|
||||
|
||||
|
@ -83,7 +70,6 @@ public class WorkChunkErrorEvent extends BaseWorkChunkEvent {
|
|||
.appendSuper(super.hashCode())
|
||||
.append(myChunkId)
|
||||
.append(myErrorMsg)
|
||||
.append(maxRetries)
|
||||
.toHashCode();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ import java.util.Set;
|
|||
* @see hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/docs/server_jpa_batch/batch2_states.md
|
||||
*/
|
||||
public enum WorkChunkStatusEnum {
|
||||
// TODO: Whis is missing a state - WAITING for gated. it would simplify stats wipmb - not this PR
|
||||
// TODO MB: missing a state - WAITING for gated. it would simplify stats - not in this MR - later
|
||||
QUEUED, IN_PROGRESS, ERRORED, FAILED, COMPLETED;
|
||||
|
||||
private static final EnumMap<WorkChunkStatusEnum, Set<WorkChunkStatusEnum>> ourPriorStates;
|
||||
|
|
|
@ -17,14 +17,23 @@
|
|||
* limitations under the License.
|
||||
* #L%
|
||||
*/
|
||||
|
||||
/**
|
||||
* Our distributed batch processing library.
|
||||
*
|
||||
* WIPMB Plan
|
||||
* done split status enum
|
||||
* done move work chunk methods to IWorkChunkPersistence
|
||||
* wipmb convert work chunk methods to events - requires bump
|
||||
* wipmb review tx layer - the variety of @Transaction annotations is scary.
|
||||
* A running job corresponds to a {@link ca.uhn.fhir.batch2.model.JobInstance}.
|
||||
* Jobs are modeled as a sequence of steps, operating on {@link ca.uhn.fhir.batch2.model.WorkChunk}s
|
||||
* containing json data. The first step is special -- it is empty, and the data is assumed to be the job parameters.
|
||||
* A {@link ca.uhn.fhir.batch2.model.JobDefinition} defines the sequence of {@link ca.uhn.fhir.batch2.model.JobDefinitionStep}s.
|
||||
* Each step defines the input chunk type, the output chunk type, and a procedure that receives the input and emits 0 or more outputs.
|
||||
* We have a special kind of final step called a reducer, which corresponds to the stream Collector concept.
|
||||
*
|
||||
* Design gaps:
|
||||
* <ul>
|
||||
* <li> If the maintenance job is killed while sending notifications about
|
||||
* a gated step advance, remaining chunks will never be notified. A CREATED state before QUEUED would catch this.
|
||||
* </li>
|
||||
* </ul>
|
||||
*/
|
||||
package ca.uhn.fhir.batch2;
|
||||
|
||||
|
|
|
@ -33,21 +33,23 @@ import java.util.HashMap;
|
|||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
class InstanceProgress {
|
||||
public class InstanceProgress {
|
||||
private static final Logger ourLog = Logs.getBatchTroubleshootingLog();
|
||||
|
||||
private int myRecordsProcessed = 0;
|
||||
|
||||
// these 4 cover all chunks
|
||||
private int myIncompleteChunkCount = 0;
|
||||
private int myQueuedCount = 0;
|
||||
private int myCompleteChunkCount = 0;
|
||||
private int myErroredChunkCount = 0;
|
||||
private int myFailedChunkCount = 0;
|
||||
|
||||
private int myErrorCountForAllStatuses = 0;
|
||||
private Long myEarliestStartTime = null;
|
||||
private Long myLatestEndTime = null;
|
||||
private Date myEarliestStartTime = null;
|
||||
private Date myLatestEndTime = null;
|
||||
private String myErrormessage = null;
|
||||
private StatusEnum myNewStatus = null;
|
||||
private Map<String, Map<WorkChunkStatusEnum, Integer>> myStepToStatusCountMap = new HashMap<>();
|
||||
private final Map<String, Map<WorkChunkStatusEnum, Integer>> myStepToStatusCountMap = new HashMap<>();
|
||||
|
||||
public void addChunk(WorkChunk theChunk) {
|
||||
myErrorCountForAllStatuses += theChunk.getErrorCount();
|
||||
|
@ -86,18 +88,14 @@ class InstanceProgress {
|
|||
}
|
||||
|
||||
private void updateLatestEndTime(WorkChunk theChunk) {
|
||||
if (theChunk.getEndTime() != null) {
|
||||
if (myLatestEndTime == null || myLatestEndTime < theChunk.getEndTime().getTime()) {
|
||||
myLatestEndTime = theChunk.getEndTime().getTime();
|
||||
}
|
||||
if (theChunk.getEndTime() != null && (myLatestEndTime == null || myLatestEndTime.before(theChunk.getEndTime()))) {
|
||||
myLatestEndTime = theChunk.getEndTime();
|
||||
}
|
||||
}
|
||||
|
||||
private void updateEarliestTime(WorkChunk theChunk) {
|
||||
if (theChunk.getStartTime() != null) {
|
||||
if (myEarliestStartTime == null || myEarliestStartTime > theChunk.getStartTime().getTime()) {
|
||||
myEarliestStartTime = theChunk.getStartTime().getTime();
|
||||
}
|
||||
if (theChunk.getStartTime() != null && (myEarliestStartTime == null || myEarliestStartTime.after(theChunk.getStartTime()))) {
|
||||
myEarliestStartTime = theChunk.getStartTime();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -107,67 +105,68 @@ class InstanceProgress {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the job instance with status information.
|
||||
* We shouldn't read any values from theInstance here -- just write.
|
||||
*
|
||||
* @param theInstance the instance to update with progress statistics
|
||||
*/
|
||||
public void updateInstance(JobInstance theInstance) {
|
||||
if (myEarliestStartTime != null) {
|
||||
theInstance.setStartTime(new Date(myEarliestStartTime));
|
||||
theInstance.setStartTime(myEarliestStartTime);
|
||||
}
|
||||
if (myLatestEndTime != null && hasNewStatus() && myNewStatus.isEnded()) {
|
||||
theInstance.setEndTime(myLatestEndTime);
|
||||
}
|
||||
theInstance.setErrorCount(myErrorCountForAllStatuses);
|
||||
theInstance.setCombinedRecordsProcessed(myRecordsProcessed);
|
||||
|
||||
updateStatus(theInstance);
|
||||
if (getChunkCount() > 0) {
|
||||
double percentComplete = (double) (myCompleteChunkCount) / (double) getChunkCount();
|
||||
theInstance.setProgress(percentComplete);
|
||||
}
|
||||
|
||||
setEndTime(theInstance);
|
||||
if (myEarliestStartTime != null && myLatestEndTime != null) {
|
||||
long elapsedTime = myLatestEndTime.getTime() - myEarliestStartTime.getTime();
|
||||
if (elapsedTime > 0) {
|
||||
double throughput = StopWatch.getThroughput(myRecordsProcessed, elapsedTime, TimeUnit.SECONDS);
|
||||
theInstance.setCombinedRecordsProcessedPerSecond(throughput);
|
||||
|
||||
String estimatedTimeRemaining = StopWatch.formatEstimatedTimeRemaining(myCompleteChunkCount, getChunkCount(), elapsedTime);
|
||||
theInstance.setEstimatedTimeRemaining(estimatedTimeRemaining);
|
||||
}
|
||||
}
|
||||
|
||||
theInstance.setErrorMessage(myErrormessage);
|
||||
}
|
||||
|
||||
private void setEndTime(JobInstance theInstance) {
|
||||
if (myLatestEndTime != null) {
|
||||
if (myFailedChunkCount > 0) {
|
||||
theInstance.setEndTime(new Date(myLatestEndTime));
|
||||
} else if (myCompleteChunkCount > 0 && myIncompleteChunkCount == 0 && myErroredChunkCount == 0) {
|
||||
theInstance.setEndTime(new Date(myLatestEndTime));
|
||||
}
|
||||
if (hasNewStatus()) {
|
||||
ourLog.trace("Status will change for {}: {}", theInstance.getInstanceId(), myNewStatus);
|
||||
}
|
||||
}
|
||||
|
||||
private void updateStatus(JobInstance theInstance) {
|
||||
ourLog.trace("Updating status for instance with errors: {}", myErroredChunkCount);
|
||||
if (myCompleteChunkCount >= 1 || myErroredChunkCount >= 1) {
|
||||
ourLog.trace("Statistics for job {}: complete/in-progress/errored/failed chunk count {}/{}/{}/{}",
|
||||
theInstance.getInstanceId(), myCompleteChunkCount, myIncompleteChunkCount, myErroredChunkCount, myFailedChunkCount);
|
||||
}
|
||||
|
||||
double percentComplete = (double) (myCompleteChunkCount) / (double) (myIncompleteChunkCount + myCompleteChunkCount + myFailedChunkCount + myErroredChunkCount);
|
||||
theInstance.setProgress(percentComplete);
|
||||
private int getChunkCount() {
|
||||
return myIncompleteChunkCount + myCompleteChunkCount + myFailedChunkCount + myErroredChunkCount;
|
||||
}
|
||||
|
||||
if (jobSuccessfullyCompleted()) {
|
||||
myNewStatus = StatusEnum.COMPLETED;
|
||||
} else if (myErroredChunkCount > 0) {
|
||||
myNewStatus = StatusEnum.ERRORED;
|
||||
}
|
||||
|
||||
ourLog.trace("Status is now {} with errored chunk count {}", myNewStatus, myErroredChunkCount);
|
||||
if (myEarliestStartTime != null && myLatestEndTime != null) {
|
||||
long elapsedTime = myLatestEndTime - myEarliestStartTime;
|
||||
if (elapsedTime > 0) {
|
||||
double throughput = StopWatch.getThroughput(myRecordsProcessed, elapsedTime, TimeUnit.SECONDS);
|
||||
theInstance.setCombinedRecordsProcessedPerSecond(throughput);
|
||||
|
||||
String estimatedTimeRemaining = StopWatch.formatEstimatedTimeRemaining(myCompleteChunkCount, (myCompleteChunkCount + myIncompleteChunkCount), elapsedTime);
|
||||
theInstance.setEstimatedTimeRemaining(estimatedTimeRemaining);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Transitions from IN_PROGRESS/ERRORED based on chunk statuses.
|
||||
*/
|
||||
public void calculateNewStatus() {
|
||||
if (myFailedChunkCount > 0) {
|
||||
myNewStatus = StatusEnum.FAILED;
|
||||
} else if (myErroredChunkCount > 0) {
|
||||
myNewStatus = StatusEnum.ERRORED;
|
||||
} else if (myIncompleteChunkCount == 0 && myCompleteChunkCount > 0) {
|
||||
myNewStatus = StatusEnum.COMPLETED;
|
||||
}
|
||||
}
|
||||
|
||||
private boolean jobSuccessfullyCompleted() {
|
||||
return myIncompleteChunkCount == 0 && myErroredChunkCount == 0 && myFailedChunkCount == 0;
|
||||
}
|
||||
|
||||
public boolean failed() {
|
||||
return myFailedChunkCount > 0;
|
||||
}
|
||||
|
||||
public boolean changed() {
|
||||
return (myIncompleteChunkCount + myCompleteChunkCount + myErroredChunkCount) >= 2 || myErrorCountForAllStatuses > 0;
|
||||
return (myIncompleteChunkCount + myCompleteChunkCount + myErroredChunkCount + myErrorCountForAllStatuses) > 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -22,10 +22,10 @@ package ca.uhn.fhir.batch2.progress;
|
|||
import ca.uhn.fhir.batch2.api.IJobPersistence;
|
||||
import ca.uhn.fhir.batch2.coordinator.JobDefinitionRegistry;
|
||||
import ca.uhn.fhir.batch2.maintenance.JobChunkProgressAccumulator;
|
||||
import ca.uhn.fhir.batch2.model.JobInstance;
|
||||
import ca.uhn.fhir.batch2.model.StatusEnum;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunk;
|
||||
import ca.uhn.fhir.util.Logs;
|
||||
import ca.uhn.fhir.util.StopWatch;
|
||||
import org.slf4j.Logger;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
|
@ -40,58 +40,53 @@ public class JobInstanceProgressCalculator {
|
|||
public JobInstanceProgressCalculator(IJobPersistence theJobPersistence, JobChunkProgressAccumulator theProgressAccumulator, JobDefinitionRegistry theJobDefinitionRegistry) {
|
||||
myJobPersistence = theJobPersistence;
|
||||
myProgressAccumulator = theProgressAccumulator;
|
||||
myJobInstanceStatusUpdater = new JobInstanceStatusUpdater(theJobPersistence, theJobDefinitionRegistry);
|
||||
myJobInstanceStatusUpdater = new JobInstanceStatusUpdater(theJobDefinitionRegistry);
|
||||
}
|
||||
|
||||
public void calculateAndStoreInstanceProgress(JobInstance theInstance) {
|
||||
String instanceId = theInstance.getInstanceId();
|
||||
public void calculateAndStoreInstanceProgress(String theInstanceId) {
|
||||
StopWatch stopWatch = new StopWatch();
|
||||
ourLog.trace("calculating progress: {}", theInstanceId);
|
||||
|
||||
InstanceProgress instanceProgress = calculateInstanceProgress(instanceId);
|
||||
InstanceProgress instanceProgress = calculateInstanceProgress(theInstanceId);
|
||||
|
||||
if (instanceProgress.failed()) {
|
||||
myJobInstanceStatusUpdater.setFailed(theInstance);
|
||||
}
|
||||
|
||||
JobInstance currentInstance = myJobPersistence.fetchInstance(instanceId).orElse(null);
|
||||
if (currentInstance != null) {
|
||||
myJobPersistence.updateInstance(theInstanceId, currentInstance->{
|
||||
instanceProgress.updateInstance(currentInstance);
|
||||
|
||||
if (instanceProgress.changed() || currentInstance.getStatus() == StatusEnum.IN_PROGRESS) {
|
||||
if (currentInstance.getCombinedRecordsProcessed() > 0) {
|
||||
ourLog.info("Job {} of type {} has status {} - {} records processed ({}/sec) - ETA: {}", currentInstance.getInstanceId(), currentInstance.getJobDefinitionId(), currentInstance.getStatus(), currentInstance.getCombinedRecordsProcessed(), currentInstance.getCombinedRecordsProcessedPerSecond(), currentInstance.getEstimatedTimeRemaining());
|
||||
ourLog.debug(instanceProgress.toString());
|
||||
} else {
|
||||
ourLog.info("Job {} of type {} has status {} - {} records processed", currentInstance.getInstanceId(), currentInstance.getJobDefinitionId(), currentInstance.getStatus(), currentInstance.getCombinedRecordsProcessed());
|
||||
ourLog.debug(instanceProgress.toString());
|
||||
}
|
||||
ourLog.debug(instanceProgress.toString());
|
||||
}
|
||||
|
||||
if (instanceProgress.changed()) {
|
||||
if (instanceProgress.hasNewStatus()) {
|
||||
myJobInstanceStatusUpdater.updateInstanceStatus(currentInstance, instanceProgress.getNewStatus());
|
||||
} else {
|
||||
myJobPersistence.updateInstance(currentInstance);
|
||||
}
|
||||
|
||||
if (instanceProgress.hasNewStatus()) {
|
||||
myJobInstanceStatusUpdater.updateInstanceStatus(currentInstance, instanceProgress.getNewStatus());
|
||||
}
|
||||
|
||||
}
|
||||
return true;
|
||||
});
|
||||
ourLog.trace("calculating progress: {} - complete in {}", theInstanceId, stopWatch);
|
||||
}
|
||||
|
||||
@Nonnull
|
||||
private InstanceProgress calculateInstanceProgress(String instanceId) {
|
||||
public InstanceProgress calculateInstanceProgress(String instanceId) {
|
||||
InstanceProgress instanceProgress = new InstanceProgress();
|
||||
Iterator<WorkChunk> workChunkIterator = myJobPersistence.fetchAllWorkChunksIterator(instanceId, false);
|
||||
|
||||
while (workChunkIterator.hasNext()) {
|
||||
WorkChunk next = workChunkIterator.next();
|
||||
// global stats
|
||||
myProgressAccumulator.addChunk(next);
|
||||
// instance stats
|
||||
instanceProgress.addChunk(next);
|
||||
}
|
||||
|
||||
instanceProgress.calculateNewStatus();
|
||||
|
||||
return instanceProgress;
|
||||
}
|
||||
|
||||
public void calculateInstanceProgressAndPopulateInstance(JobInstance theInstance) {
|
||||
InstanceProgress progress = calculateInstanceProgress(theInstance.getInstanceId());
|
||||
progress.updateInstance(theInstance);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,28 +20,29 @@
|
|||
package ca.uhn.fhir.batch2.progress;
|
||||
|
||||
import ca.uhn.fhir.batch2.api.IJobCompletionHandler;
|
||||
import ca.uhn.fhir.batch2.api.IJobPersistence;
|
||||
import ca.uhn.fhir.batch2.api.JobCompletionDetails;
|
||||
import ca.uhn.fhir.batch2.coordinator.JobDefinitionRegistry;
|
||||
import ca.uhn.fhir.batch2.model.JobDefinition;
|
||||
import ca.uhn.fhir.batch2.model.JobInstance;
|
||||
import ca.uhn.fhir.batch2.model.StatusEnum;
|
||||
import ca.uhn.fhir.util.Logs;
|
||||
import ca.uhn.fhir.model.api.IModelJson;
|
||||
import ca.uhn.fhir.util.Logs;
|
||||
import org.slf4j.Logger;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
public class JobInstanceStatusUpdater {
|
||||
private static final Logger ourLog = Logs.getBatchTroubleshootingLog();
|
||||
private final IJobPersistence myJobPersistence;
|
||||
private final JobDefinitionRegistry myJobDefinitionRegistry;
|
||||
|
||||
public JobInstanceStatusUpdater(IJobPersistence theJobPersistence, JobDefinitionRegistry theJobDefinitionRegistry) {
|
||||
myJobPersistence = theJobPersistence;
|
||||
public JobInstanceStatusUpdater(JobDefinitionRegistry theJobDefinitionRegistry) {
|
||||
myJobDefinitionRegistry = theJobDefinitionRegistry;
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the status on the instance, and call any completion handlers when entering a completion state.
|
||||
* @param theJobInstance the instance to mutate
|
||||
* @param theNewStatus target status
|
||||
* @return was the state change allowed?
|
||||
*/
|
||||
public boolean updateInstanceStatus(JobInstance theJobInstance, StatusEnum theNewStatus) {
|
||||
StatusEnum origStatus = theJobInstance.getStatus();
|
||||
if (origStatus == theNewStatus) {
|
||||
|
@ -53,34 +54,9 @@ public class JobInstanceStatusUpdater {
|
|||
}
|
||||
theJobInstance.setStatus(theNewStatus);
|
||||
ourLog.debug("Updating job instance {} of type {} from {} to {}", theJobInstance.getInstanceId(), theJobInstance.getJobDefinitionId(), origStatus, theNewStatus);
|
||||
return updateInstance(theJobInstance);
|
||||
}
|
||||
handleStatusChange(theJobInstance);
|
||||
|
||||
private boolean updateInstance(JobInstance theJobInstance) {
|
||||
Optional<JobInstance> oInstance = myJobPersistence.fetchInstance(theJobInstance.getInstanceId());
|
||||
if (oInstance.isEmpty()) {
|
||||
ourLog.error("Trying to update instance of non-existent Instance {}", theJobInstance);
|
||||
return false;
|
||||
}
|
||||
|
||||
StatusEnum origStatus = oInstance.get().getStatus();
|
||||
StatusEnum newStatus = theJobInstance.getStatus();
|
||||
if (!StatusEnum.isLegalStateTransition(origStatus, newStatus)) {
|
||||
ourLog.error("Ignoring illegal state transition for job instance {} of type {} from {} to {}", theJobInstance.getInstanceId(), theJobInstance.getJobDefinitionId(), origStatus, newStatus);
|
||||
return false;
|
||||
}
|
||||
|
||||
boolean statusChanged = myJobPersistence.updateInstance(theJobInstance);
|
||||
|
||||
// This code can be called by both the maintenance service and the fast track work step executor.
|
||||
// We only want to call the completion handler if the status was changed to COMPLETED in this thread. We use the
|
||||
// record changed count from of a sql update change status to rely on the database to tell us which thread
|
||||
// the status change happened in.
|
||||
if (statusChanged) {
|
||||
ourLog.info("Changing job instance {} of type {} from {} to {}", theJobInstance.getInstanceId(), theJobInstance.getJobDefinitionId(), origStatus, theJobInstance.getStatus());
|
||||
handleStatusChange(theJobInstance);
|
||||
}
|
||||
return statusChanged;
|
||||
return true;
|
||||
}
|
||||
|
||||
private <PT extends IModelJson> void handleStatusChange(JobInstance theJobInstance) {
|
||||
|
@ -113,19 +89,4 @@ public class JobInstanceStatusUpdater {
|
|||
theJobCompletionHandler.jobComplete(completionDetails);
|
||||
}
|
||||
|
||||
public boolean setCompleted(JobInstance theInstance) {
|
||||
return updateInstanceStatus(theInstance, StatusEnum.COMPLETED);
|
||||
}
|
||||
|
||||
public boolean setInProgress(JobInstance theInstance) {
|
||||
return updateInstanceStatus(theInstance, StatusEnum.IN_PROGRESS);
|
||||
}
|
||||
|
||||
public boolean setCancelled(JobInstance theInstance) {
|
||||
return updateInstanceStatus(theInstance, StatusEnum.CANCELLED);
|
||||
}
|
||||
|
||||
public boolean setFailed(JobInstance theInstance) {
|
||||
return updateInstanceStatus(theInstance, StatusEnum.FAILED);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,11 +15,10 @@ import ca.uhn.fhir.batch2.model.JobInstance;
|
|||
import ca.uhn.fhir.batch2.model.JobInstanceStartRequest;
|
||||
import ca.uhn.fhir.batch2.model.JobWorkNotification;
|
||||
import ca.uhn.fhir.batch2.model.JobWorkNotificationJsonMessage;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunkCompletionEvent;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunkCreateEvent;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunkErrorEvent;
|
||||
import ca.uhn.fhir.batch2.model.StatusEnum;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunk;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunkCompletionEvent;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunkErrorEvent;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunkStatusEnum;
|
||||
import ca.uhn.fhir.jpa.batch.models.Batch2JobStartResponse;
|
||||
import ca.uhn.fhir.jpa.dao.tx.IHapiTransactionService;
|
||||
|
@ -48,7 +47,7 @@ import java.util.Optional;
|
|||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertNull;
|
||||
import static org.junit.jupiter.api.Assertions.assertSame;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
import static org.junit.jupiter.api.Assertions.fail;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
|
@ -85,13 +84,18 @@ public class JobCoordinatorImplTest extends BaseBatch2Test {
|
|||
private ArgumentCaptor<JobWorkNotification> myJobWorkNotificationCaptor;
|
||||
@Captor
|
||||
private ArgumentCaptor<JobInstance> myJobInstanceCaptor;
|
||||
@Captor
|
||||
private ArgumentCaptor<JobDefinition> myJobDefinitionCaptor;
|
||||
@Captor
|
||||
private ArgumentCaptor<String> myParametersJsonCaptor;
|
||||
|
||||
|
||||
@BeforeEach
|
||||
public void beforeEach() {
|
||||
// The code refactored to keep the same functionality,
|
||||
// but in this service (so it's a real service here!)
|
||||
WorkChunkProcessor jobStepExecutorSvc = new WorkChunkProcessor(myJobInstancePersister, myBatchJobSender);
|
||||
mySvc = new JobCoordinatorImpl(myBatchJobSender, myWorkChannelReceiver, myJobInstancePersister, myJobDefinitionRegistry, jobStepExecutorSvc, myJobMaintenanceService);
|
||||
mySvc = new JobCoordinatorImpl(myBatchJobSender, myWorkChannelReceiver, myJobInstancePersister, myJobDefinitionRegistry, jobStepExecutorSvc, myJobMaintenanceService, myTransactionService);
|
||||
}
|
||||
|
||||
@AfterEach
|
||||
|
@ -176,8 +180,6 @@ public class JobCoordinatorImplTest extends BaseBatch2Test {
|
|||
existingCompletedInstance.setInstanceId(completedInstanceId);
|
||||
|
||||
// when
|
||||
when(myJobDefinitionRegistry.getLatestJobDefinition(eq(JOB_DEFINITION_ID)))
|
||||
.thenReturn(Optional.of(def));
|
||||
when(myJobInstancePersister.fetchInstances(any(FetchJobInstancesRequest.class), anyInt(), anyInt()))
|
||||
.thenReturn(Arrays.asList(existingInProgInstance));
|
||||
|
||||
|
@ -198,37 +200,6 @@ public class JobCoordinatorImplTest extends BaseBatch2Test {
|
|||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* If the first step doesn't produce any work chunks, then
|
||||
* the instance should be marked as complete right away.
|
||||
*/
|
||||
@Test
|
||||
public void testPerformStep_FirstStep_NoWorkChunksProduced() {
|
||||
|
||||
// Setup
|
||||
|
||||
setupMocks(createJobDefinition(), createWorkChunkStep1());
|
||||
when(myStep1Worker.run(any(), any())).thenReturn(new RunOutcome(50));
|
||||
when(myJobInstancePersister.fetchInstance(INSTANCE_ID)).thenReturn(Optional.of(ourQueuedInstance));
|
||||
|
||||
mySvc.start();
|
||||
|
||||
// Execute
|
||||
|
||||
myWorkChannelReceiver.send(new JobWorkNotificationJsonMessage(createWorkNotification(STEP_1)));
|
||||
|
||||
// Verify
|
||||
|
||||
verify(myStep1Worker, times(1)).run(myStep1ExecutionDetailsCaptor.capture(), any());
|
||||
TestJobParameters params = myStep1ExecutionDetailsCaptor.getValue().getParameters();
|
||||
assertEquals(PARAM_1_VALUE, params.getParam1());
|
||||
assertEquals(PARAM_2_VALUE, params.getParam2());
|
||||
assertEquals(PASSWORD_VALUE, params.getPassword());
|
||||
|
||||
// QUEUED -> IN_PROGRESS and IN_PROGRESS -> COMPLETED
|
||||
verify(myJobInstancePersister, times(2)).updateInstance(any());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPerformStep_FirstStep_GatedExecutionMode() {
|
||||
|
||||
|
@ -416,7 +387,6 @@ public class JobCoordinatorImplTest extends BaseBatch2Test {
|
|||
|
||||
String exceptionMessage = "badbadnotgood";
|
||||
when(myJobDefinitionRegistry.getJobDefinitionOrThrowException(eq(JOB_DEFINITION_ID), eq(1))).thenThrow(new JobExecutionFailedException(exceptionMessage));
|
||||
when(myJobInstancePersister.onWorkChunkDequeue(eq(CHUNK_ID))).thenReturn(Optional.of(createWorkChunkStep2()));
|
||||
mySvc.start();
|
||||
|
||||
// Execute
|
||||
|
@ -441,7 +411,9 @@ public class JobCoordinatorImplTest extends BaseBatch2Test {
|
|||
public void testPerformStep_ChunkNotKnown() {
|
||||
|
||||
// Setup
|
||||
|
||||
JobDefinition jobDefinition = createJobDefinition();
|
||||
when(myJobDefinitionRegistry.getJobDefinitionOrThrowException(JOB_DEFINITION_ID, 1)).thenReturn(jobDefinition);
|
||||
when(myJobInstancePersister.fetchInstance(eq(INSTANCE_ID))).thenReturn(Optional.of(createInstance()));
|
||||
when(myJobInstancePersister.onWorkChunkDequeue(eq(CHUNK_ID))).thenReturn(Optional.empty());
|
||||
mySvc.start();
|
||||
|
||||
|
@ -486,10 +458,10 @@ public class JobCoordinatorImplTest extends BaseBatch2Test {
|
|||
|
||||
// Setup
|
||||
|
||||
JobDefinition<TestJobParameters> jobDefinition = createJobDefinition();
|
||||
when(myJobDefinitionRegistry.getLatestJobDefinition(eq(JOB_DEFINITION_ID)))
|
||||
.thenReturn(Optional.of(createJobDefinition()));
|
||||
when(myJobInstancePersister.storeNewInstance(any()))
|
||||
.thenReturn(INSTANCE_ID).thenReturn(INSTANCE_ID);
|
||||
.thenReturn(Optional.of(jobDefinition));
|
||||
when(myJobInstancePersister.onCreateWithFirstChunk(any(), any())).thenReturn(new IJobPersistence.CreateResult(INSTANCE_ID, CHUNK_ID));
|
||||
|
||||
// Execute
|
||||
|
||||
|
@ -501,24 +473,16 @@ public class JobCoordinatorImplTest extends BaseBatch2Test {
|
|||
// Verify
|
||||
|
||||
verify(myJobInstancePersister, times(1))
|
||||
.storeNewInstance(myJobInstanceCaptor.capture());
|
||||
assertNull(myJobInstanceCaptor.getValue().getInstanceId());
|
||||
assertEquals(JOB_DEFINITION_ID, myJobInstanceCaptor.getValue().getJobDefinitionId());
|
||||
assertEquals(1, myJobInstanceCaptor.getValue().getJobDefinitionVersion());
|
||||
assertEquals(PARAM_1_VALUE, myJobInstanceCaptor.getValue().getParameters(TestJobParameters.class).getParam1());
|
||||
assertEquals(PARAM_2_VALUE, myJobInstanceCaptor.getValue().getParameters(TestJobParameters.class).getParam2());
|
||||
assertEquals(PASSWORD_VALUE, myJobInstanceCaptor.getValue().getParameters(TestJobParameters.class).getPassword());
|
||||
assertEquals(StatusEnum.QUEUED, myJobInstanceCaptor.getValue().getStatus());
|
||||
.onCreateWithFirstChunk(myJobDefinitionCaptor.capture(), myParametersJsonCaptor.capture());
|
||||
assertSame(jobDefinition, myJobDefinitionCaptor.getValue());
|
||||
assertEquals(startRequest.getParameters(), myParametersJsonCaptor.getValue());
|
||||
|
||||
verify(myBatchJobSender, times(1)).sendWorkChannelMessage(myJobWorkNotificationCaptor.capture());
|
||||
assertNull(myJobWorkNotificationCaptor.getAllValues().get(0).getChunkId());
|
||||
assertEquals(CHUNK_ID, myJobWorkNotificationCaptor.getAllValues().get(0).getChunkId());
|
||||
assertEquals(JOB_DEFINITION_ID, myJobWorkNotificationCaptor.getAllValues().get(0).getJobDefinitionId());
|
||||
assertEquals(1, myJobWorkNotificationCaptor.getAllValues().get(0).getJobDefinitionVersion());
|
||||
assertEquals(STEP_1, myJobWorkNotificationCaptor.getAllValues().get(0).getTargetStepId());
|
||||
|
||||
WorkChunkCreateEvent expectedWorkChunk = new WorkChunkCreateEvent(JOB_DEFINITION_ID, 1, STEP_1, INSTANCE_ID, 0, null);
|
||||
verify(myJobInstancePersister, times(1)).onWorkChunkCreate(eq(expectedWorkChunk));
|
||||
|
||||
verifyNoMoreInteractions(myJobInstancePersister);
|
||||
verifyNoMoreInteractions(myStep1Worker);
|
||||
verifyNoMoreInteractions(myStep2Worker);
|
||||
|
|
|
@ -116,7 +116,7 @@ class JobDataSinkTest {
|
|||
assertEquals(JOB_DEF_VERSION, notification.getJobDefinitionVersion());
|
||||
assertEquals(LAST_STEP_ID, notification.getTargetStepId());
|
||||
|
||||
BatchWorkChunk batchWorkChunk = myBatchWorkChunkCaptor.getValue();
|
||||
WorkChunkCreateEvent batchWorkChunk = myBatchWorkChunkCaptor.getValue();
|
||||
assertEquals(JOB_DEF_VERSION, batchWorkChunk.jobDefinitionVersion);
|
||||
assertEquals(0, batchWorkChunk.sequence);
|
||||
assertEquals(JOB_DEF_ID, batchWorkChunk.jobDefinitionId);
|
||||
|
|
|
@ -5,10 +5,11 @@ import ca.uhn.fhir.batch2.api.JobExecutionFailedException;
|
|||
import ca.uhn.fhir.batch2.model.JobDefinition;
|
||||
import ca.uhn.fhir.batch2.model.JobInstance;
|
||||
import ca.uhn.fhir.batch2.model.JobWorkCursor;
|
||||
import ca.uhn.fhir.batch2.model.StatusEnum;
|
||||
import ca.uhn.fhir.batch2.model.WorkChunkData;
|
||||
import ca.uhn.fhir.util.Logs;
|
||||
import ca.uhn.fhir.model.api.IModelJson;
|
||||
import ca.uhn.fhir.util.JsonUtil;
|
||||
import ca.uhn.fhir.util.Logs;
|
||||
import ch.qos.logback.classic.Level;
|
||||
import ch.qos.logback.classic.Logger;
|
||||
import ch.qos.logback.classic.spi.ILoggingEvent;
|
||||
|
@ -17,21 +18,18 @@ import com.fasterxml.jackson.annotation.JsonProperty;
|
|||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.junit.jupiter.MockitoExtension;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Optional;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
import static org.junit.jupiter.api.Assertions.fail;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.ArgumentMatchers.anyBoolean;
|
||||
import static org.mockito.ArgumentMatchers.anyString;
|
||||
import static org.mockito.ArgumentMatchers.eq;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
@ExtendWith(MockitoExtension.class)
|
||||
|
@ -47,7 +45,7 @@ public class ReductionStepDataSinkTest {
|
|||
|
||||
private static class StepOutputData implements IModelJson {
|
||||
@JsonProperty("data")
|
||||
private final String myData;
|
||||
final String myData;
|
||||
public StepOutputData(String theData) {
|
||||
myData = theData;
|
||||
}
|
||||
|
@ -94,19 +92,16 @@ public class ReductionStepDataSinkTest {
|
|||
WorkChunkData<StepOutputData> chunkData = new WorkChunkData<>(stepData);
|
||||
|
||||
// when
|
||||
when(myJobPersistence.fetchInstance(eq(INSTANCE_ID)))
|
||||
.thenReturn(Optional.of(JobInstance.fromInstanceId(INSTANCE_ID)));
|
||||
JobInstance instance = JobInstance.fromInstanceId(INSTANCE_ID);
|
||||
instance.setStatus(StatusEnum.FINALIZE);
|
||||
stubUpdateInstanceCallback(instance);
|
||||
when(myJobPersistence.fetchAllWorkChunksIterator(any(), anyBoolean())).thenReturn(Collections.emptyIterator());
|
||||
|
||||
// test
|
||||
myDataSink.accept(chunkData);
|
||||
|
||||
// verify
|
||||
ArgumentCaptor<JobInstance> instanceCaptor = ArgumentCaptor.forClass(JobInstance.class);
|
||||
verify(myJobPersistence)
|
||||
.updateInstance(instanceCaptor.capture());
|
||||
|
||||
assertEquals(JsonUtil.serialize(stepData, false), instanceCaptor.getValue().getReport());
|
||||
assertEquals(JsonUtil.serialize(stepData, false), instance.getReport());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -119,23 +114,23 @@ public class ReductionStepDataSinkTest {
|
|||
|
||||
ourLogger.setLevel(Level.ERROR);
|
||||
|
||||
// when
|
||||
when(myJobPersistence.fetchInstance(eq(INSTANCE_ID)))
|
||||
.thenReturn(Optional.of(JobInstance.fromInstanceId(INSTANCE_ID)));
|
||||
JobInstance instance = JobInstance.fromInstanceId(INSTANCE_ID);
|
||||
instance.setStatus(StatusEnum.FINALIZE);
|
||||
when(myJobPersistence.fetchAllWorkChunksIterator(any(), anyBoolean())).thenReturn(Collections.emptyIterator());
|
||||
stubUpdateInstanceCallback(instance);
|
||||
|
||||
// test
|
||||
myDataSink.accept(firstData);
|
||||
myDataSink.accept(secondData);
|
||||
assertThrows(IllegalStateException.class, ()->
|
||||
myDataSink.accept(secondData));
|
||||
|
||||
// verify
|
||||
ArgumentCaptor<ILoggingEvent> logCaptor = ArgumentCaptor.forClass(ILoggingEvent.class);
|
||||
verify(myListAppender).doAppend(logCaptor.capture());
|
||||
assertEquals(1, logCaptor.getAllValues().size());
|
||||
ILoggingEvent log = logCaptor.getValue();
|
||||
assertTrue(log.getFormattedMessage().contains(
|
||||
"Report has already been set. Now it is being overwritten. Last in will win!"
|
||||
));
|
||||
}
|
||||
|
||||
private void stubUpdateInstanceCallback(JobInstance theJobInstance) {
|
||||
when(myJobPersistence.updateInstance(eq(INSTANCE_ID), any())).thenAnswer(call->{
|
||||
IJobPersistence.JobInstanceUpdateCallback callback = call.getArgument(1);
|
||||
return callback.doUpdate(theJobInstance);
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -143,10 +138,8 @@ public class ReductionStepDataSinkTest {
|
|||
// setup
|
||||
String data = "data";
|
||||
WorkChunkData<StepOutputData> chunkData = new WorkChunkData<>(new StepOutputData(data));
|
||||
|
||||
// when
|
||||
when(myJobPersistence.fetchInstance(anyString()))
|
||||
.thenReturn(Optional.empty());
|
||||
when(myJobPersistence.updateInstance(any(), any())).thenReturn(false);
|
||||
when(myJobPersistence.fetchAllWorkChunksIterator(any(), anyBoolean())).thenReturn(Collections.emptyIterator());
|
||||
|
||||
// test
|
||||
try {
|
||||
|
@ -155,7 +148,7 @@ public class ReductionStepDataSinkTest {
|
|||
} catch (JobExecutionFailedException ex) {
|
||||
assertTrue(ex.getMessage().contains("No instance found with Id " + INSTANCE_ID));
|
||||
} catch (Exception anyOtherEx) {
|
||||
fail(anyOtherEx.getMessage());
|
||||
fail("Unexpected exception", anyOtherEx);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,6 @@ import ca.uhn.fhir.jpa.subscription.channel.api.IChannelProducer;
|
|||
import com.google.common.collect.Lists;
|
||||
import org.hl7.fhir.r4.model.DateTimeType;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Nested;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
|
@ -32,7 +31,6 @@ import org.mockito.Spy;
|
|||
import org.mockito.junit.jupiter.MockitoExtension;
|
||||
import org.springframework.messaging.Message;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
|
@ -55,7 +53,6 @@ import static org.mockito.ArgumentMatchers.any;
|
|||
import static org.mockito.ArgumentMatchers.anyBoolean;
|
||||
import static org.mockito.ArgumentMatchers.anyInt;
|
||||
import static org.mockito.ArgumentMatchers.eq;
|
||||
import static org.mockito.Mockito.never;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.verifyNoMoreInteractions;
|
||||
|
@ -75,8 +72,6 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
|
|||
@Spy
|
||||
private JpaStorageSettings myStorageSettings = new JpaStorageSettings();
|
||||
private JobMaintenanceServiceImpl mySvc;
|
||||
@Captor
|
||||
private ArgumentCaptor<JobInstance> myInstanceCaptor;
|
||||
private JobDefinitionRegistry myJobDefinitionRegistry;
|
||||
@Mock
|
||||
private IChannelProducer myWorkChannelProducer;
|
||||
|
@ -103,15 +98,21 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
|
|||
|
||||
@Test
|
||||
public void testInProgress_CalculateProgress_FirstCompleteButNoOtherStepsYetComplete() {
|
||||
List<WorkChunk> chunks = new ArrayList<>();
|
||||
chunks.add(JobCoordinatorImplTest.createWorkChunk(STEP_1, null).setStatus(WorkChunkStatusEnum.COMPLETED));
|
||||
List<WorkChunk> chunks = List.of(
|
||||
JobCoordinatorImplTest.createWorkChunk(STEP_1, null).setStatus(WorkChunkStatusEnum.COMPLETED),
|
||||
JobCoordinatorImplTest.createWorkChunk(STEP_2, null).setStatus(WorkChunkStatusEnum.QUEUED)
|
||||
);
|
||||
when(myJobPersistence.fetchAllWorkChunksIterator(eq(INSTANCE_ID), eq(false)))
|
||||
.thenReturn(chunks.iterator());
|
||||
|
||||
myJobDefinitionRegistry.addJobDefinition(createJobDefinition());
|
||||
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(createInstance()));
|
||||
JobInstance instance = createInstance();
|
||||
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(List.of(instance));
|
||||
when(myJobPersistence.fetchInstance(INSTANCE_ID)).thenReturn(Optional.of(instance));
|
||||
|
||||
mySvc.runMaintenancePass();
|
||||
|
||||
verify(myJobPersistence, never()).updateInstance(any());
|
||||
verify(myJobPersistence, times(1)).updateInstance(any(), any());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -125,15 +126,16 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
|
|||
JobCoordinatorImplTest.createWorkChunkStep3().setStatus(WorkChunkStatusEnum.COMPLETED).setStartTime(parseTime("2022-02-12T14:01:00-04:00")).setEndTime(parseTime("2022-02-12T14:10:00-04:00")).setRecordsProcessed(25)
|
||||
);
|
||||
myJobDefinitionRegistry.addJobDefinition(createJobDefinition());
|
||||
when(myJobPersistence.fetchInstance(eq(INSTANCE_ID))).thenReturn(Optional.of(createInstance()));
|
||||
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(createInstance()));
|
||||
JobInstance instance = createInstance();
|
||||
when(myJobPersistence.fetchInstance(eq(INSTANCE_ID))).thenReturn(Optional.of(instance));
|
||||
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(instance));
|
||||
when(myJobPersistence.fetchAllWorkChunksIterator(eq(INSTANCE_ID), eq(false)))
|
||||
.thenReturn(chunks.iterator());
|
||||
stubUpdateInstanceCallback(instance);
|
||||
|
||||
mySvc.runMaintenancePass();
|
||||
|
||||
verify(myJobPersistence, times(1)).updateInstance(myInstanceCaptor.capture());
|
||||
JobInstance instance = myInstanceCaptor.getValue();
|
||||
verify(myJobPersistence, times(1)).updateInstance(eq(INSTANCE_ID), any());
|
||||
|
||||
assertEquals(0.5, instance.getProgress());
|
||||
assertEquals(50, instance.getCombinedRecordsProcessed());
|
||||
|
@ -146,6 +148,13 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
|
|||
verifyNoMoreInteractions(myJobPersistence);
|
||||
}
|
||||
|
||||
private void stubUpdateInstanceCallback(JobInstance theJobInstance) {
|
||||
when(myJobPersistence.updateInstance(eq(INSTANCE_ID), any())).thenAnswer(call->{
|
||||
IJobPersistence.JobInstanceUpdateCallback callback = call.getArgument(1);
|
||||
return callback.doUpdate(theJobInstance);
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInProgress_CalculateProgress_InstanceHasErrorButNoChunksAreErrored() {
|
||||
// Setup
|
||||
|
@ -158,19 +167,19 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
|
|||
JobCoordinatorImplTest.createWorkChunkStep3().setStatus(WorkChunkStatusEnum.COMPLETED).setStartTime(parseTime("2022-02-12T14:01:00-04:00")).setEndTime(parseTime("2022-02-12T14:10:00-04:00")).setRecordsProcessed(25)
|
||||
);
|
||||
myJobDefinitionRegistry.addJobDefinition(createJobDefinition());
|
||||
JobInstance instance1 = createInstance();
|
||||
instance1.setErrorMessage("This is an error message");
|
||||
JobInstance instance = createInstance();
|
||||
instance.setErrorMessage("This is an error message");
|
||||
when(myJobPersistence.fetchInstance(eq(INSTANCE_ID))).thenReturn(Optional.of(createInstance()));
|
||||
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(instance1));
|
||||
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(instance));
|
||||
when(myJobPersistence.fetchAllWorkChunksIterator(eq(INSTANCE_ID), eq(false)))
|
||||
.thenReturn(chunks.iterator());
|
||||
stubUpdateInstanceCallback(instance);
|
||||
|
||||
// Execute
|
||||
mySvc.runMaintenancePass();
|
||||
|
||||
// Verify
|
||||
verify(myJobPersistence, times(1)).updateInstance(myInstanceCaptor.capture());
|
||||
JobInstance instance = myInstanceCaptor.getValue();
|
||||
verify(myJobPersistence, times(1)).updateInstance(eq(INSTANCE_ID), any());
|
||||
|
||||
assertNull(instance.getErrorMessage());
|
||||
assertEquals(4, instance.getErrorCount());
|
||||
|
@ -185,6 +194,7 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
|
|||
public void testInProgress_GatedExecution_FirstStepComplete() {
|
||||
// Setup
|
||||
List<WorkChunk> chunks = Arrays.asList(
|
||||
JobCoordinatorImplTest.createWorkChunkStep1().setStatus(WorkChunkStatusEnum.COMPLETED).setId(CHUNK_ID + "abc"),
|
||||
JobCoordinatorImplTest.createWorkChunkStep2().setStatus(WorkChunkStatusEnum.QUEUED).setId(CHUNK_ID),
|
||||
JobCoordinatorImplTest.createWorkChunkStep2().setStatus(WorkChunkStatusEnum.QUEUED).setId(CHUNK_ID_2)
|
||||
);
|
||||
|
@ -195,19 +205,21 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
|
|||
.thenReturn(chunks.iterator());
|
||||
|
||||
when(myJobPersistence.fetchAllChunkIdsForStepWithStatus(eq(INSTANCE_ID), eq(STEP_2), eq(WorkChunkStatusEnum.QUEUED)))
|
||||
.thenReturn(chunks.stream().map(chunk -> chunk.getId()).collect(Collectors.toList()));
|
||||
.thenReturn(chunks.stream().filter(c->c.getTargetStepId().equals(STEP_2)).map(WorkChunk::getId).collect(Collectors.toList()));
|
||||
|
||||
JobInstance instance1 = createInstance();
|
||||
instance1.setCurrentGatedStepId(STEP_1);
|
||||
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(instance1));
|
||||
when(myJobPersistence.fetchInstance(eq(INSTANCE_ID))).thenReturn(Optional.of(instance1));
|
||||
when(myJobPersistence.fetchInstance(INSTANCE_ID)).thenReturn(Optional.of(instance1));
|
||||
stubUpdateInstanceCallback(instance1);
|
||||
|
||||
// Execute
|
||||
mySvc.runMaintenancePass();
|
||||
|
||||
// Verify
|
||||
verify(myWorkChannelProducer, times(2)).send(myMessageCaptor.capture());
|
||||
verify(myJobPersistence, times(2)).updateInstance(myInstanceCaptor.capture());
|
||||
verify(myJobPersistence, times(2)).updateInstance(eq(INSTANCE_ID), any());
|
||||
verifyNoMoreInteractions(myJobPersistence);
|
||||
JobWorkNotification payload0 = myMessageCaptor.getAllValues().get(0).getPayload();
|
||||
assertEquals(STEP_2, payload0.getTargetStepId());
|
||||
assertEquals(CHUNK_ID, payload0.getChunkId());
|
||||
|
@ -223,7 +235,7 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
|
|||
instance.setStatus(StatusEnum.FAILED);
|
||||
instance.setEndTime(parseTime("2001-01-01T12:12:12Z"));
|
||||
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(instance));
|
||||
when(myJobPersistence.fetchInstance(eq(INSTANCE_ID))).thenReturn(Optional.of(instance));
|
||||
when(myJobPersistence.fetchInstance(INSTANCE_ID)).thenReturn(Optional.of(instance));
|
||||
|
||||
mySvc.runMaintenancePass();
|
||||
|
||||
|
@ -234,33 +246,20 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
|
|||
@Test
|
||||
public void testInProgress_CalculateProgress_AllStepsComplete() {
|
||||
// Setup
|
||||
List<WorkChunk> chunks = new ArrayList<>();
|
||||
|
||||
chunks.add(
|
||||
createWorkChunkStep1().setStatus(WorkChunkStatusEnum.COMPLETED).setStartTime(parseTime("2022-02-12T14:00:00-04:00")).setEndTime(parseTime("2022-02-12T14:01:00-04:00")).setRecordsProcessed(25)
|
||||
);
|
||||
chunks.add(
|
||||
JobCoordinatorImplTest.createWorkChunkStep2().setStatus(WorkChunkStatusEnum.COMPLETED).setStartTime(parseTime("2022-02-12T14:00:01-04:00")).setEndTime(parseTime("2022-02-12T14:06:00-04:00")).setRecordsProcessed(25)
|
||||
);
|
||||
chunks.add(
|
||||
JobCoordinatorImplTest.createWorkChunkStep2().setStatus(WorkChunkStatusEnum.COMPLETED).setStartTime(parseTime("2022-02-12T14:00:02-04:00")).setEndTime(parseTime("2022-02-12T14:06:00-04:00")).setRecordsProcessed(25)
|
||||
);
|
||||
chunks.add(
|
||||
JobCoordinatorImplTest.createWorkChunkStep2().setStatus(WorkChunkStatusEnum.COMPLETED).setStartTime(parseTime("2022-02-12T14:00:03-04:00")).setEndTime(parseTime("2022-02-12T14:06:00-04:00")).setRecordsProcessed(25)
|
||||
);
|
||||
chunks.add(
|
||||
JobCoordinatorImplTest.createWorkChunkStep2().setStatus(WorkChunkStatusEnum.COMPLETED).setStartTime(parseTime("2022-02-12T14:00:00-04:00")).setEndTime(parseTime("2022-02-12T14:10:00-04:00")).setRecordsProcessed(25)
|
||||
);
|
||||
chunks.add(
|
||||
JobCoordinatorImplTest.createWorkChunkStep3().setStatus(WorkChunkStatusEnum.COMPLETED).setStartTime(parseTime("2022-02-12T14:01:00-04:00")).setEndTime(parseTime("2022-02-12T14:10:00-04:00")).setRecordsProcessed(25)
|
||||
List<WorkChunk> chunks = List.of(
|
||||
createWorkChunkStep1().setStatus(WorkChunkStatusEnum.COMPLETED).setStartTime(parseTime("2022-02-12T14:00:00-04:00")).setEndTime(parseTime("2022-02-12T14:01:00-04:00")).setRecordsProcessed(25),
|
||||
JobCoordinatorImplTest.createWorkChunkStep2().setStatus(WorkChunkStatusEnum.COMPLETED).setStartTime(parseTime("2022-02-12T14:00:01-04:00")).setEndTime(parseTime("2022-02-12T14:06:00-04:00")).setRecordsProcessed(25),
|
||||
JobCoordinatorImplTest.createWorkChunkStep2().setStatus(WorkChunkStatusEnum.COMPLETED).setStartTime(parseTime("2022-02-12T14:00:02-04:00")).setEndTime(parseTime("2022-02-12T14:06:00-04:00")).setRecordsProcessed(25),
|
||||
JobCoordinatorImplTest.createWorkChunkStep2().setStatus(WorkChunkStatusEnum.COMPLETED).setStartTime(parseTime("2022-02-12T14:00:03-04:00")).setEndTime(parseTime("2022-02-12T14:06:00-04:00")).setRecordsProcessed(25),
|
||||
JobCoordinatorImplTest.createWorkChunkStep2().setStatus(WorkChunkStatusEnum.COMPLETED).setStartTime(parseTime("2022-02-12T14:00:00-04:00")).setEndTime(parseTime("2022-02-12T14:10:00-04:00")).setRecordsProcessed(25),JobCoordinatorImplTest.createWorkChunkStep3().setStatus(WorkChunkStatusEnum.COMPLETED).setStartTime(parseTime("2022-02-12T14:01:00-04:00")).setEndTime(parseTime("2022-02-12T14:10:00-04:00")).setRecordsProcessed(25)
|
||||
);
|
||||
|
||||
myJobDefinitionRegistry.addJobDefinition(createJobDefinition(t -> t.completionHandler(myCompletionHandler)));
|
||||
JobInstance instance1 = createInstance();
|
||||
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(instance1));
|
||||
JobInstance instance = createInstance();
|
||||
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(instance));
|
||||
when(myJobPersistence.fetchAllWorkChunksIterator(eq(INSTANCE_ID), anyBoolean())).thenAnswer(t->chunks.iterator());
|
||||
when(myJobPersistence.updateInstance(any())).thenReturn(true);
|
||||
when(myJobPersistence.fetchInstance(INSTANCE_ID)).thenReturn(Optional.of(instance1));
|
||||
when(myJobPersistence.fetchInstance(INSTANCE_ID)).thenReturn(Optional.of(instance));
|
||||
stubUpdateInstanceCallback(instance);
|
||||
|
||||
// Execute
|
||||
|
||||
|
@ -268,8 +267,7 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
|
|||
|
||||
// Verify
|
||||
|
||||
verify(myJobPersistence, times(2)).updateInstance(myInstanceCaptor.capture());
|
||||
JobInstance instance = myInstanceCaptor.getAllValues().get(0);
|
||||
verify(myJobPersistence, times(2)).updateInstance(eq(INSTANCE_ID), any());
|
||||
|
||||
assertEquals(1.0, instance.getProgress());
|
||||
assertEquals(StatusEnum.COMPLETED, instance.getStatus());
|
||||
|
@ -288,36 +286,25 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
|
|||
|
||||
@Test
|
||||
public void testInProgress_CalculateProgress_OneStepFailed() {
|
||||
ArrayList<WorkChunk> chunks = new ArrayList<>();
|
||||
chunks.add(
|
||||
createWorkChunkStep1().setStatus(WorkChunkStatusEnum.COMPLETED).setStartTime(parseTime("2022-02-12T14:00:00-04:00")).setEndTime(parseTime("2022-02-12T14:01:00-04:00")).setRecordsProcessed(25)
|
||||
);
|
||||
chunks.add(
|
||||
JobCoordinatorImplTest.createWorkChunkStep2().setStatus(WorkChunkStatusEnum.COMPLETED).setStartTime(parseTime("2022-02-12T14:00:01-04:00")).setEndTime(parseTime("2022-02-12T14:06:00-04:00")).setRecordsProcessed(25)
|
||||
);
|
||||
chunks.add(
|
||||
JobCoordinatorImplTest.createWorkChunkStep2().setStatus(WorkChunkStatusEnum.FAILED).setStartTime(parseTime("2022-02-12T14:00:02-04:00")).setEndTime(parseTime("2022-02-12T14:06:00-04:00")).setRecordsProcessed(25).setErrorMessage("This is an error message")
|
||||
);
|
||||
chunks.add(
|
||||
JobCoordinatorImplTest.createWorkChunkStep2().setStatus(WorkChunkStatusEnum.COMPLETED).setStartTime(parseTime("2022-02-12T14:00:03-04:00")).setEndTime(parseTime("2022-02-12T14:06:00-04:00")).setRecordsProcessed(25)
|
||||
);
|
||||
chunks.add(
|
||||
JobCoordinatorImplTest.createWorkChunkStep2().setStatus(WorkChunkStatusEnum.COMPLETED).setStartTime(parseTime("2022-02-12T14:00:00-04:00")).setEndTime(parseTime("2022-02-12T14:10:00-04:00")).setRecordsProcessed(25)
|
||||
);
|
||||
chunks.add(
|
||||
List<WorkChunk> chunks = List.of(
|
||||
createWorkChunkStep1().setStatus(WorkChunkStatusEnum.COMPLETED).setStartTime(parseTime("2022-02-12T14:00:00-04:00")).setEndTime(parseTime("2022-02-12T14:01:00-04:00")).setRecordsProcessed(25),
|
||||
JobCoordinatorImplTest.createWorkChunkStep2().setStatus(WorkChunkStatusEnum.COMPLETED).setStartTime(parseTime("2022-02-12T14:00:01-04:00")).setEndTime(parseTime("2022-02-12T14:06:00-04:00")).setRecordsProcessed(25),
|
||||
JobCoordinatorImplTest.createWorkChunkStep2().setStatus(WorkChunkStatusEnum.FAILED).setStartTime(parseTime("2022-02-12T14:00:02-04:00")).setEndTime(parseTime("2022-02-12T14:06:00-04:00")).setRecordsProcessed(25).setErrorMessage("This is an error message"),
|
||||
JobCoordinatorImplTest.createWorkChunkStep2().setStatus(WorkChunkStatusEnum.COMPLETED).setStartTime(parseTime("2022-02-12T14:00:03-04:00")).setEndTime(parseTime("2022-02-12T14:06:00-04:00")).setRecordsProcessed(25),
|
||||
JobCoordinatorImplTest.createWorkChunkStep2().setStatus(WorkChunkStatusEnum.COMPLETED).setStartTime(parseTime("2022-02-12T14:00:00-04:00")).setEndTime(parseTime("2022-02-12T14:10:00-04:00")).setRecordsProcessed(25),
|
||||
JobCoordinatorImplTest.createWorkChunkStep3().setStatus(WorkChunkStatusEnum.COMPLETED).setStartTime(parseTime("2022-02-12T14:01:00-04:00")).setEndTime(parseTime("2022-02-12T14:10:00-04:00")).setRecordsProcessed(25)
|
||||
);
|
||||
|
||||
myJobDefinitionRegistry.addJobDefinition(createJobDefinition());
|
||||
when(myJobPersistence.fetchInstance(eq(INSTANCE_ID))).thenReturn(Optional.of(createInstance()));
|
||||
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(createInstance()));
|
||||
JobInstance instance = createInstance();
|
||||
when(myJobPersistence.fetchInstance(eq(INSTANCE_ID))).thenReturn(Optional.of(instance));
|
||||
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(instance));
|
||||
when(myJobPersistence.fetchAllWorkChunksIterator(eq(INSTANCE_ID), anyBoolean()))
|
||||
.thenAnswer(t->chunks.iterator());
|
||||
stubUpdateInstanceCallback(instance);
|
||||
|
||||
mySvc.runMaintenancePass();
|
||||
|
||||
verify(myJobPersistence, times(3)).updateInstance(myInstanceCaptor.capture());
|
||||
JobInstance instance = myInstanceCaptor.getAllValues().get(0);
|
||||
|
||||
assertEquals(0.8333333333333334, instance.getProgress());
|
||||
assertEquals(StatusEnum.FAILED, instance.getStatus());
|
||||
|
@ -326,79 +313,15 @@ public class JobMaintenanceServiceImplTest extends BaseBatch2Test {
|
|||
assertEquals(0.25, instance.getCombinedRecordsProcessedPerSecond());
|
||||
assertEquals(parseTime("2022-02-12T14:10:00-04:00"), instance.getEndTime());
|
||||
|
||||
// twice - once to move to FAILED, and once to purge the chunks
|
||||
verify(myJobPersistence, times(2)).updateInstance(eq(INSTANCE_ID), any());
|
||||
verify(myJobPersistence, times(1)).deleteChunksAndMarkInstanceAsChunksPurged(eq(INSTANCE_ID));
|
||||
|
||||
verifyNoMoreInteractions(myJobPersistence);
|
||||
}
|
||||
|
||||
|
||||
@Nested
|
||||
public class CancellationTests {
|
||||
|
||||
@Test
|
||||
public void afterFirstMaintenancePass() {
|
||||
// Setup
|
||||
ArrayList<WorkChunk> chunks = new ArrayList<>();
|
||||
chunks.add(
|
||||
JobCoordinatorImplTest.createWorkChunkStep2().setStatus(WorkChunkStatusEnum.QUEUED).setId(CHUNK_ID)
|
||||
);
|
||||
chunks.add(
|
||||
JobCoordinatorImplTest.createWorkChunkStep2().setStatus(WorkChunkStatusEnum.QUEUED).setId(CHUNK_ID_2)
|
||||
);
|
||||
myJobDefinitionRegistry.addJobDefinition(createJobDefinition(JobDefinition.Builder::gatedExecution));
|
||||
when(myJobPersistence.fetchAllWorkChunksIterator(eq(INSTANCE_ID), anyBoolean())).thenAnswer(t->chunks.iterator());
|
||||
JobInstance instance1 = createInstance();
|
||||
instance1.setCurrentGatedStepId(STEP_1);
|
||||
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(instance1));
|
||||
when(myJobPersistence.fetchInstance(eq(INSTANCE_ID))).thenReturn(Optional.of(instance1));
|
||||
|
||||
mySvc.runMaintenancePass();
|
||||
|
||||
// Execute
|
||||
instance1.setCancelled(true);
|
||||
|
||||
mySvc.runMaintenancePass();
|
||||
|
||||
// Verify
|
||||
verify(myJobPersistence, times(2)).updateInstance(myInstanceCaptor.capture());
|
||||
assertEquals(StatusEnum.CANCELLED, instance1.getStatus());
|
||||
assertTrue(instance1.getErrorMessage().startsWith("Job instance cancelled"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void afterSecondMaintenancePass() {
|
||||
// Setup
|
||||
ArrayList<WorkChunk> chunks = new ArrayList<>();
|
||||
chunks.add(
|
||||
JobCoordinatorImplTest.createWorkChunkStep2().setStatus(WorkChunkStatusEnum.QUEUED).setId(CHUNK_ID)
|
||||
);
|
||||
chunks.add(
|
||||
JobCoordinatorImplTest.createWorkChunkStep2().setStatus(WorkChunkStatusEnum.QUEUED).setId(CHUNK_ID_2)
|
||||
);
|
||||
myJobDefinitionRegistry.addJobDefinition(createJobDefinition(JobDefinition.Builder::gatedExecution));
|
||||
when(myJobPersistence.fetchAllWorkChunksIterator(eq(INSTANCE_ID), anyBoolean())).thenAnswer(t->chunks.iterator());
|
||||
JobInstance instance1 = createInstance();
|
||||
instance1.setCurrentGatedStepId(STEP_1);
|
||||
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Lists.newArrayList(instance1));
|
||||
when(myJobPersistence.fetchInstance(eq(INSTANCE_ID))).thenReturn(Optional.of(instance1));
|
||||
|
||||
mySvc.runMaintenancePass();
|
||||
mySvc.runMaintenancePass();
|
||||
|
||||
// Execute
|
||||
instance1.setCancelled(true);
|
||||
|
||||
mySvc.runMaintenancePass();
|
||||
|
||||
// Verify
|
||||
assertEquals(StatusEnum.CANCELLED, instance1.getStatus());
|
||||
assertTrue(instance1.getErrorMessage().startsWith("Job instance cancelled"));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
void triggerMaintenancePass_noneInProgress_runsMaintenace() {
|
||||
void triggerMaintenancePass_noneInProgress_runsMaintenance() {
|
||||
when(myJobPersistence.fetchInstances(anyInt(), eq(0))).thenReturn(Collections.emptyList());
|
||||
mySvc.triggerMaintenancePass();
|
||||
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
package ca.uhn.fhir.batch2.model;
|
||||
|
||||
import ca.uhn.fhir.test.utilities.RandomDataHelper;
|
||||
import org.apache.commons.lang3.builder.EqualsBuilder;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.util.Random;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.*;
|
||||
|
||||
class JobInstanceTest {
|
||||
|
||||
@Test
|
||||
void testCopyConstructor_randomFieldsCopied_areEqual() {
|
||||
// given
|
||||
JobInstance instance = new JobInstance();
|
||||
RandomDataHelper.fillFieldsRandomly(instance);
|
||||
|
||||
// when
|
||||
JobInstance copy = new JobInstance(instance);
|
||||
|
||||
// then
|
||||
assertTrue(EqualsBuilder.reflectionEquals(instance, copy));
|
||||
}
|
||||
|
||||
}
|
|
@ -14,11 +14,11 @@ import static org.junit.jupiter.api.Assertions.assertEquals;
|
|||
class StatusEnumTest {
|
||||
@Test
|
||||
public void testEndedStatuses() {
|
||||
assertThat(StatusEnum.getEndedStatuses(), containsInAnyOrder(StatusEnum.COMPLETED, StatusEnum.FAILED, StatusEnum.CANCELLED, StatusEnum.ERRORED));
|
||||
assertThat(StatusEnum.getEndedStatuses(), containsInAnyOrder(StatusEnum.COMPLETED, StatusEnum.FAILED, StatusEnum.CANCELLED));
|
||||
}
|
||||
@Test
|
||||
public void testNotEndedStatuses() {
|
||||
assertThat(StatusEnum.getNotEndedStatuses(), containsInAnyOrder(StatusEnum.QUEUED, StatusEnum.IN_PROGRESS, StatusEnum.FINALIZE));
|
||||
assertThat(StatusEnum.getNotEndedStatuses(), containsInAnyOrder(StatusEnum.QUEUED, StatusEnum.IN_PROGRESS, StatusEnum.ERRORED, StatusEnum.FINALIZE));
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
|
@ -39,7 +39,7 @@ class StatusEnumTest {
|
|||
|
||||
"COMPLETED, QUEUED, false",
|
||||
"COMPLETED, IN_PROGRESS, false",
|
||||
"COMPLETED, COMPLETED, true",
|
||||
"COMPLETED, COMPLETED, false",
|
||||
"COMPLETED, CANCELLED, false",
|
||||
"COMPLETED, ERRORED, false",
|
||||
"COMPLETED, FAILED, false",
|
||||
|
@ -47,7 +47,7 @@ class StatusEnumTest {
|
|||
"CANCELLED, QUEUED, false",
|
||||
"CANCELLED, IN_PROGRESS, false",
|
||||
"CANCELLED, COMPLETED, false",
|
||||
"CANCELLED, CANCELLED, true",
|
||||
"CANCELLED, CANCELLED, false",
|
||||
"CANCELLED, ERRORED, false",
|
||||
"CANCELLED, FAILED, false",
|
||||
|
||||
|
@ -84,8 +84,7 @@ class StatusEnumTest {
|
|||
@ParameterizedTest
|
||||
@EnumSource(StatusEnum.class)
|
||||
public void testCancellableStates(StatusEnum theState) {
|
||||
assertEquals(StatusEnum.ourFromStates.get(StatusEnum.CANCELLED).contains(theState), theState.isCancellable()
|
||||
|| theState == StatusEnum.CANCELLED); // hack: isLegalStateTransition() always returns true for self-transition
|
||||
assertEquals(StatusEnum.ourFromStates.get(StatusEnum.CANCELLED).contains(theState), theState.isCancellable());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -2,7 +2,6 @@ package ca.uhn.fhir.batch2.progress;
|
|||
|
||||
import ca.uhn.fhir.batch2.api.IJobCompletionHandler;
|
||||
import ca.uhn.fhir.batch2.api.IJobInstance;
|
||||
import ca.uhn.fhir.batch2.api.IJobPersistence;
|
||||
import ca.uhn.fhir.batch2.api.JobCompletionDetails;
|
||||
import ca.uhn.fhir.batch2.coordinator.JobDefinitionRegistry;
|
||||
import ca.uhn.fhir.batch2.model.JobDefinition;
|
||||
|
@ -17,7 +16,6 @@ import org.mockito.InjectMocks;
|
|||
import org.mockito.Mock;
|
||||
import org.mockito.junit.jupiter.MockitoExtension;
|
||||
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
|
@ -33,8 +31,6 @@ class JobInstanceStatusUpdaterTest {
|
|||
private static final int TEST_ERROR_COUNT = 729;
|
||||
private final JobInstance myQueuedInstance = new JobInstance().setStatus(StatusEnum.QUEUED);
|
||||
|
||||
@Mock
|
||||
IJobPersistence myJobPersistence;
|
||||
@Mock
|
||||
private JobDefinition<TestParameters> myJobDefinition;
|
||||
@Mock
|
||||
|
@ -91,8 +87,6 @@ class JobInstanceStatusUpdaterTest {
|
|||
|
||||
private void setupCompleteCallback() {
|
||||
myDetails = new AtomicReference<>();
|
||||
when(myJobPersistence.fetchInstance(TEST_INSTANCE_ID)).thenReturn(Optional.of(myQueuedInstance));
|
||||
when(myJobPersistence.updateInstance(myInstance)).thenReturn(true);
|
||||
IJobCompletionHandler<TestParameters> completionHandler = details -> myDetails.set(details);
|
||||
when(myJobDefinition.getCompletionHandler()).thenReturn(completionHandler);
|
||||
when(myJobDefinition.getParametersType()).thenReturn(TestParameters.class);
|
||||
|
@ -102,8 +96,6 @@ class JobInstanceStatusUpdaterTest {
|
|||
public void testErrorHandler_ERROR() {
|
||||
// setup
|
||||
myDetails = new AtomicReference<>();
|
||||
when(myJobPersistence.fetchInstance(TEST_INSTANCE_ID)).thenReturn(Optional.of(myQueuedInstance));
|
||||
when(myJobPersistence.updateInstance(myInstance)).thenReturn(true);
|
||||
|
||||
// execute
|
||||
mySvc.updateInstanceStatus(myInstance, StatusEnum.ERRORED);
|
||||
|
@ -145,8 +137,6 @@ class JobInstanceStatusUpdaterTest {
|
|||
myDetails = new AtomicReference<>();
|
||||
|
||||
// setup
|
||||
when(myJobPersistence.fetchInstance(TEST_INSTANCE_ID)).thenReturn(Optional.of(myQueuedInstance));
|
||||
when(myJobPersistence.updateInstance(myInstance)).thenReturn(true);
|
||||
IJobCompletionHandler<TestParameters> errorHandler = details -> myDetails.set(details);
|
||||
when(myJobDefinition.getErrorHandler()).thenReturn(errorHandler);
|
||||
when(myJobDefinition.getParametersType()).thenReturn(TestParameters.class);
|
||||
|
|
|
@ -133,7 +133,7 @@ public interface IIdHelperService<T extends IResourcePersistentId> {
|
|||
|
||||
Optional<String> translatePidIdToForcedIdWithCache(T theResourcePersistentId);
|
||||
|
||||
PersistentIdToForcedIdMap translatePidsToForcedIds(Set<T> theResourceIds);
|
||||
PersistentIdToForcedIdMap<T> translatePidsToForcedIds(Set<T> theResourceIds);
|
||||
|
||||
/**
|
||||
* Pre-cache a PID-to-Resource-ID mapping for later retrieval by {@link #translatePidsToForcedIds(Set)} and related methods
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
*/
|
||||
package ca.uhn.fhir.jpa.dao.tx;
|
||||
|
||||
import org.springframework.transaction.support.SimpleTransactionStatus;
|
||||
import org.springframework.transaction.support.TransactionCallback;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
|
@ -33,6 +34,6 @@ public class NonTransactionalHapiTransactionService extends HapiTransactionServi
|
|||
@Nullable
|
||||
@Override
|
||||
protected <T> T doExecute(ExecutionBuilder theExecutionBuilder, TransactionCallback<T> theCallback) {
|
||||
return theCallback.doInTransaction(null);
|
||||
return theCallback.doInTransaction(new SimpleTransactionStatus());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,56 @@
|
|||
package ca.uhn.fhir.test.utilities;
|
||||
|
||||
import org.apache.commons.lang3.Validate;
|
||||
import org.springframework.util.ReflectionUtils;
|
||||
|
||||
import java.lang.reflect.Modifier;
|
||||
import java.util.Date;
|
||||
import java.util.Random;
|
||||
import java.util.UUID;
|
||||
|
||||
public class RandomDataHelper {
|
||||
public static void fillFieldsRandomly(Object theTarget) {
|
||||
new RandomDataHelper().fillFields(theTarget);
|
||||
}
|
||||
|
||||
public void fillFields(Object theTarget) {
|
||||
ReflectionUtils.doWithFields(theTarget.getClass(), field->{
|
||||
Class<?> fieldType = field.getType();
|
||||
if (!Modifier.isFinal(field.getModifiers())) {
|
||||
ReflectionUtils.makeAccessible(field);
|
||||
Object value = generateRandomValue(fieldType);
|
||||
field.set(theTarget, value);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
public Object generateRandomValue(Class<?> fieldType) {
|
||||
Random random = new Random();
|
||||
Object result = null;
|
||||
if (fieldType.equals(String.class)) {
|
||||
result = UUID.randomUUID().toString();
|
||||
} else if (fieldType.equals(UUID.class)) {
|
||||
result = UUID.randomUUID();
|
||||
} else if (Date.class.isAssignableFrom(fieldType)) {
|
||||
result = new Date(System.currentTimeMillis() - random.nextInt(100000000));
|
||||
} else if (fieldType.equals(Integer.TYPE)) {
|
||||
result = random.nextInt();
|
||||
} else if (fieldType.equals(Long.TYPE)) {
|
||||
result = random.nextInt();
|
||||
} else if (fieldType.equals(Long.class)) {
|
||||
result = random.nextLong();
|
||||
} else if (fieldType.equals(Double.class) || fieldType.equals(Double.TYPE)) {
|
||||
result = random.nextDouble();
|
||||
} else if (Number.class.isAssignableFrom(fieldType)) {
|
||||
result = random.nextInt(Byte.MAX_VALUE) + 1;
|
||||
} else if (Enum.class.isAssignableFrom(fieldType)) {
|
||||
Object[] enumValues = fieldType.getEnumConstants();
|
||||
result = enumValues[random.nextInt(enumValues.length)];
|
||||
} else if (fieldType.equals(Boolean.TYPE) || fieldType.equals(Boolean.class)) {
|
||||
result = random.nextBoolean();
|
||||
}
|
||||
Validate.notNull(result, "Does not support type %s", fieldType);
|
||||
return result;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
package ca.uhn.fhir.test.utilities;
|
||||
|
||||
import ca.uhn.fhir.model.api.TemporalPrecisionEnum;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.util.Date;
|
||||
import java.util.UUID;
|
||||
|
||||
import static org.hamcrest.MatcherAssert.assertThat;
|
||||
import static org.hamcrest.Matchers.blankOrNullString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
||||
class RandomDataHelperTest {
|
||||
|
||||
static class TestClass {
|
||||
String myString;
|
||||
int myInt;
|
||||
Long myBoxedLong;
|
||||
Date myDate;
|
||||
UUID myUUID;
|
||||
TemporalPrecisionEnum myEnum;
|
||||
}
|
||||
|
||||
@Test
|
||||
void fillFieldsRandomly() {
|
||||
TestClass object = new TestClass();
|
||||
|
||||
RandomDataHelper.fillFieldsRandomly(object);
|
||||
|
||||
assertThat(object.myString, not(blankOrNullString()));
|
||||
assertThat(object.myInt, not(equalTo(0)));
|
||||
assertThat(object.myBoxedLong, notNullValue());
|
||||
assertThat(object.myDate, notNullValue());
|
||||
assertThat(object.myUUID, notNullValue());
|
||||
assertThat(object.myEnum, notNullValue());
|
||||
}
|
||||
|
||||
}
|
Loading…
Reference in New Issue