Normalize and update counts.
This commit is contained in:
parent
6266b510d2
commit
928b80d602
|
@ -38,6 +38,7 @@ import ca.uhn.fhir.jpa.model.entity.ResourceHistoryTable;
|
|||
import ca.uhn.fhir.jpa.model.entity.ResourceTable;
|
||||
import ca.uhn.fhir.jpa.partition.IRequestPartitionHelperSvc;
|
||||
import ca.uhn.fhir.jpa.search.PersistedJpaBundleProviderFactory;
|
||||
import ca.uhn.fhir.jpa.search.SearchConstants;
|
||||
import ca.uhn.fhir.jpa.util.QueryChunker;
|
||||
import ca.uhn.fhir.jpa.util.ResourceCountCache;
|
||||
import ca.uhn.fhir.rest.api.server.IBundleProvider;
|
||||
|
@ -59,12 +60,12 @@ import org.springframework.context.ApplicationContext;
|
|||
import org.springframework.transaction.annotation.Propagation;
|
||||
import org.springframework.transaction.annotation.Transactional;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
import javax.annotation.Nonnull;
|
||||
|
||||
public abstract class BaseHapiFhirSystemDao<T extends IBaseBundle, MT> extends BaseStorageDao
|
||||
implements IFhirSystemDao<T, MT> {
|
||||
|
@ -183,11 +184,10 @@ public abstract class BaseHapiFhirSystemDao<T extends IBaseBundle, MT> extends B
|
|||
* When processing several resources (e.g. transaction bundle, $reindex chunk, etc.)
|
||||
* it would be slow to fetch each piece of a resource (e.g. all token index rows)
|
||||
* one resource at a time.
|
||||
* Instead, we fetch all the linked resources for the entire batch here so they are present in the Hibernate Session.
|
||||
* Instead, we fetch all the linked resources for the entire batch and populate the Hibernate Session.
|
||||
*
|
||||
* @param theResolvedIds
|
||||
* @param theResolvedIds the pids
|
||||
* @param thePreFetchIndexes Should resource indexes be loaded
|
||||
* @param <P>
|
||||
*/
|
||||
@Override
|
||||
public <P extends IResourcePersistentId> void preFetchResources(
|
||||
|
@ -195,7 +195,7 @@ public abstract class BaseHapiFhirSystemDao<T extends IBaseBundle, MT> extends B
|
|||
HapiTransactionService.requireTransaction();
|
||||
List<Long> pids = theResolvedIds.stream().map(t -> ((JpaPid) t).getId()).collect(Collectors.toList());
|
||||
|
||||
new QueryChunker<Long>().chunk(pids, ids -> {
|
||||
new QueryChunker<Long>().chunk(pids, idChunk -> {
|
||||
|
||||
/*
|
||||
* Pre-fetch the resources we're touching in this transaction in mass - this reduced the
|
||||
|
@ -208,114 +208,111 @@ public abstract class BaseHapiFhirSystemDao<T extends IBaseBundle, MT> extends B
|
|||
*
|
||||
* However, for realistic average workloads, this should reduce the number of round trips.
|
||||
*/
|
||||
if (ids.size() >= 2) {
|
||||
List<ResourceTable> loadedResourceTableEntries = new ArrayList<>();
|
||||
|
||||
prefetchResourceTableHistoryAndProvenance(ids, loadedResourceTableEntries);
|
||||
|
||||
List<Long> entityIds;
|
||||
if (idChunk.size() >= 2) {
|
||||
List<ResourceTable> entityChunk = prefetchResourceTableHistoryAndProvenance(idChunk);
|
||||
|
||||
if (thePreFetchIndexes) {
|
||||
entityIds = loadedResourceTableEntries.stream()
|
||||
.filter(ResourceTable::isParamsStringPopulated)
|
||||
.map(ResourceTable::getId)
|
||||
.collect(Collectors.toList());
|
||||
if (entityIds.size() > 0) {
|
||||
preFetchIndexes(entityIds, "string", "myParamsString", null);
|
||||
}
|
||||
|
||||
entityIds = loadedResourceTableEntries.stream()
|
||||
.filter(ResourceTable::isParamsTokenPopulated)
|
||||
.map(ResourceTable::getId)
|
||||
.collect(Collectors.toList());
|
||||
if (entityIds.size() > 0) {
|
||||
preFetchIndexes(entityIds, "token", "myParamsToken", null);
|
||||
}
|
||||
prefetchByField("string", "myParamsString", ResourceTable::isParamsStringPopulated, entityChunk);
|
||||
prefetchByField("token", "myParamsToken", ResourceTable::isParamsTokenPopulated, entityChunk);
|
||||
prefetchByField("date", "myParamsDate", ResourceTable::isParamsDatePopulated, entityChunk);
|
||||
prefetchByField(
|
||||
"quantity", "myParamsQuantity", ResourceTable::isParamsQuantityPopulated, entityChunk);
|
||||
prefetchByField("resourceLinks", "myResourceLinks", ResourceTable::isHasLinks, entityChunk);
|
||||
|
||||
entityIds = loadedResourceTableEntries.stream()
|
||||
.filter(ResourceTable::isParamsDatePopulated)
|
||||
.map(ResourceTable::getId)
|
||||
.collect(Collectors.toList());
|
||||
if (entityIds.size() > 0) {
|
||||
preFetchIndexes(entityIds, "date", "myParamsDate", null);
|
||||
}
|
||||
prefetchByJoinClause(
|
||||
"tags",
|
||||
// fetch the TagResources and the actual TagDefinitions
|
||||
"LEFT JOIN FETCH r.myTags t LEFT JOIN FETCH t.myTag",
|
||||
BaseHasResource::isHasTags,
|
||||
entityChunk);
|
||||
|
||||
entityIds = loadedResourceTableEntries.stream()
|
||||
.filter(ResourceTable::isParamsQuantityPopulated)
|
||||
.map(ResourceTable::getId)
|
||||
.collect(Collectors.toList());
|
||||
if (entityIds.size() > 0) {
|
||||
preFetchIndexes(entityIds, "quantity", "myParamsQuantity", null);
|
||||
}
|
||||
|
||||
entityIds = loadedResourceTableEntries.stream()
|
||||
.filter(ResourceTable::isHasLinks)
|
||||
.map(ResourceTable::getId)
|
||||
.collect(Collectors.toList());
|
||||
if (entityIds.size() > 0) {
|
||||
preFetchIndexes(entityIds, "resourceLinks", "myResourceLinks", null);
|
||||
}
|
||||
|
||||
entityIds = loadedResourceTableEntries.stream()
|
||||
.filter(BaseHasResource::isHasTags)
|
||||
.map(ResourceTable::getId)
|
||||
.collect(Collectors.toList());
|
||||
if (entityIds.size() > 0) {
|
||||
myResourceTagDao.findByResourceIds(entityIds);
|
||||
preFetchIndexes(entityIds, "tags", "myTags", null);
|
||||
}
|
||||
|
||||
entityIds = loadedResourceTableEntries.stream()
|
||||
.map(ResourceTable::getId)
|
||||
.collect(Collectors.toList());
|
||||
if (myStorageSettings.getIndexMissingFields() == JpaStorageSettings.IndexEnabledEnum.ENABLED) {
|
||||
preFetchIndexes(entityIds, "searchParamPresence", "mySearchParamPresents", null);
|
||||
prefetchByField("searchParamPresence", "mySearchParamPresents", r -> true, entityChunk);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private void prefetchResourceTableHistoryAndProvenance(
|
||||
List<Long> ids, List<ResourceTable> loadedResourceTableEntries) {
|
||||
new QueryChunker<Long>().chunk(ids, nextChunk -> {
|
||||
Query query = myEntityManager.createQuery("select r, h "
|
||||
+ " FROM ResourceTable r "
|
||||
+ " LEFT JOIN fetch ResourceHistoryTable h "
|
||||
+ " on r.myVersion = h.myResourceVersion and r.id = h.myResourceId "
|
||||
+ " left join fetch h.myProvenance "
|
||||
+ " WHERE r.myId IN ( :IDS ) ");
|
||||
query.setParameter("IDS", ids);
|
||||
@Nonnull
|
||||
private List<ResourceTable> prefetchResourceTableHistoryAndProvenance(List<Long> idChunk) {
|
||||
assert idChunk.size() < SearchConstants.MAX_PAGE_SIZE : "assume pre-chunked";
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
List<Object[]> allById = query.getResultList();
|
||||
Query query = myEntityManager.createQuery("select r, h "
|
||||
+ " FROM ResourceTable r "
|
||||
+ " LEFT JOIN fetch ResourceHistoryTable h "
|
||||
+ " on r.myVersion = h.myResourceVersion and r.id = h.myResourceId "
|
||||
+ " left join fetch h.myProvenance "
|
||||
+ " WHERE r.myId IN ( :IDS ) ");
|
||||
query.setParameter("IDS", idChunk);
|
||||
|
||||
for (Object[] nextPair : allById) {
|
||||
ResourceTable r = (ResourceTable) nextPair[0];
|
||||
ResourceHistoryTable h = (ResourceHistoryTable) nextPair[1];
|
||||
// history is a big weird - we hold it in a transient field because we also hold the new version.
|
||||
r.setCurrentVersionEntity(h);
|
||||
loadedResourceTableEntries.add(r);
|
||||
}
|
||||
});
|
||||
@SuppressWarnings("unchecked")
|
||||
List<Object[]> queryResults = query.getResultList();
|
||||
|
||||
return queryResults.stream()
|
||||
.map(nextPair -> {
|
||||
// Store the matching ResourceHistoryTable in the transient slot on ResourceTable
|
||||
ResourceTable result = (ResourceTable) nextPair[0];
|
||||
ResourceHistoryTable currentVersion = (ResourceHistoryTable) nextPair[1];
|
||||
result.setCurrentVersionEntity(currentVersion);
|
||||
return result;
|
||||
})
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
private void preFetchIndexes(
|
||||
List<Long> theIds,
|
||||
String typeDesc,
|
||||
String fieldName,
|
||||
@Nullable List<ResourceTable> theEntityListToPopulate) {
|
||||
new QueryChunker<Long>().chunk(theIds, ids -> {
|
||||
TypedQuery<ResourceTable> query = myEntityManager.createQuery(
|
||||
"FROM ResourceTable r LEFT JOIN FETCH r." + fieldName + " WHERE r.myId IN ( :IDS )",
|
||||
ResourceTable.class);
|
||||
query.setParameter("IDS", ids);
|
||||
List<ResourceTable> indexFetchOutcome = query.getResultList();
|
||||
ourLog.debug("Pre-fetched {} {}} indexes", indexFetchOutcome.size(), typeDesc);
|
||||
if (theEntityListToPopulate != null) {
|
||||
theEntityListToPopulate.addAll(indexFetchOutcome);
|
||||
}
|
||||
});
|
||||
/**
|
||||
* Prefetch a join field for the active subset of some ResourceTable entities.
|
||||
* Convenience wrapper around prefetchByJoinClause() for simple fields.
|
||||
*
|
||||
* @param theDescription for logging
|
||||
* @param theJpaFieldName the field to join
|
||||
* @param theEntityPredicate select which ResourceTable entities need this join
|
||||
* @param theEntities the ResourceTable entities to consider
|
||||
*/
|
||||
private void prefetchByField(
|
||||
String theDescription,
|
||||
String theJpaFieldName,
|
||||
java.util.function.Predicate<ResourceTable> theEntityPredicate,
|
||||
List<ResourceTable> theEntities) {
|
||||
|
||||
String joinClause = "LEFT JOIN FETCH r." + theJpaFieldName;
|
||||
|
||||
prefetchByJoinClause(theDescription, joinClause, theEntityPredicate, theEntities);
|
||||
}
|
||||
|
||||
/**
|
||||
* Prefetch a join field for the active subset of some ResourceTable entities.
|
||||
*
|
||||
* @param theDescription for logging
|
||||
* @param theJoinClause the JPA join expression to add to `ResourceTable r`
|
||||
* @param theEntityPredicate selects which entities need this prefetch
|
||||
* @param theEntities the ResourceTable entities to consider
|
||||
*/
|
||||
private void prefetchByJoinClause(
|
||||
String theDescription,
|
||||
String theJoinClause,
|
||||
java.util.function.Predicate<ResourceTable> theEntityPredicate,
|
||||
List<ResourceTable> theEntities) {
|
||||
|
||||
// Which entities need this prefetch?
|
||||
List<Long> idSubset = theEntities.stream()
|
||||
.filter(theEntityPredicate)
|
||||
.map(ResourceTable::getId)
|
||||
.collect(Collectors.toList());
|
||||
|
||||
if (idSubset.isEmpty()) {
|
||||
// nothing to do
|
||||
return;
|
||||
}
|
||||
|
||||
String jqlQuery = "FROM ResourceTable r " + theJoinClause + " WHERE r.myId IN ( :IDS )";
|
||||
|
||||
TypedQuery<ResourceTable> query = myEntityManager.createQuery(jqlQuery, ResourceTable.class);
|
||||
query.setParameter("IDS", idSubset);
|
||||
List<ResourceTable> indexFetchOutcome = query.getResultList();
|
||||
|
||||
ourLog.debug("Pre-fetched {} {} indexes", indexFetchOutcome.size(), theDescription);
|
||||
}
|
||||
|
||||
@Nullable
|
||||
|
|
|
@ -3,7 +3,6 @@ package ca.uhn.fhir.jpa.bulk.imprt2;
|
|||
import ca.uhn.fhir.batch2.api.JobExecutionFailedException;
|
||||
import ca.uhn.fhir.batch2.jobs.imprt.ConsumeFilesStep;
|
||||
import ca.uhn.fhir.interceptor.model.RequestPartitionId;
|
||||
import ca.uhn.fhir.jpa.api.config.JpaStorageSettings;
|
||||
import ca.uhn.fhir.jpa.dao.r4.BasePartitioningR4Test;
|
||||
import org.hl7.fhir.instance.model.api.IBaseResource;
|
||||
import org.hl7.fhir.r4.model.IdType;
|
||||
|
@ -84,7 +83,7 @@ public class ConsumeFilesStepR4Test extends BasePartitioningR4Test {
|
|||
|
||||
// Validate
|
||||
|
||||
assertEquals(7, myCaptureQueriesListener.countSelectQueriesForCurrentThread());
|
||||
assertEquals(6, myCaptureQueriesListener.countSelectQueriesForCurrentThread());
|
||||
assertEquals(0, myCaptureQueriesListener.countInsertQueriesForCurrentThread(), myCaptureQueriesListener.getInsertQueriesForCurrentThread().stream().map(t->t.getSql(true, false)).collect(Collectors.joining("\n")));
|
||||
assertEquals(0, myCaptureQueriesListener.countUpdateQueriesForCurrentThread());
|
||||
assertEquals(0, myCaptureQueriesListener.countDeleteQueriesForCurrentThread());
|
||||
|
@ -145,9 +144,9 @@ public class ConsumeFilesStepR4Test extends BasePartitioningR4Test {
|
|||
// Validate
|
||||
|
||||
if (partitionEnabled) {
|
||||
assertEquals(8, myCaptureQueriesListener.countSelectQueriesForCurrentThread());
|
||||
} else {
|
||||
assertEquals(7, myCaptureQueriesListener.countSelectQueriesForCurrentThread());
|
||||
} else {
|
||||
assertEquals(6, myCaptureQueriesListener.countSelectQueriesForCurrentThread());
|
||||
}
|
||||
assertEquals(2, myCaptureQueriesListener.countInsertQueriesForCurrentThread());
|
||||
assertEquals(4, myCaptureQueriesListener.countUpdateQueriesForCurrentThread());
|
||||
|
|
|
@ -149,7 +149,7 @@ public class FhirResourceDaoR4QueryCountTest extends BaseResourceProviderR4Test
|
|||
@Autowired
|
||||
private ISubscriptionTriggeringSvc mySubscriptionTriggeringSvc;
|
||||
@Autowired
|
||||
private ResourceModifiedSubmitterSvc myResourceModifiedSubmitterSvc;;
|
||||
private ResourceModifiedSubmitterSvc myResourceModifiedSubmitterSvc;
|
||||
@Autowired
|
||||
private ReindexStep myReindexStep;
|
||||
@Autowired
|
||||
|
@ -1927,7 +1927,7 @@ public class FhirResourceDaoR4QueryCountTest extends BaseResourceProviderR4Test
|
|||
outcome = mySystemDao.transaction(mySrd, input.get());
|
||||
ourLog.debug("Resp: {}", myFhirContext.newJsonParser().setPrettyPrint(true).encodeResourceToString(outcome));
|
||||
myCaptureQueriesListener.logSelectQueries();
|
||||
assertEquals(8, myCaptureQueriesListener.countSelectQueries());
|
||||
assertEquals(7, myCaptureQueriesListener.countSelectQueries());
|
||||
myCaptureQueriesListener.logInsertQueries();
|
||||
assertEquals(7, myCaptureQueriesListener.countInsertQueries());
|
||||
myCaptureQueriesListener.logUpdateQueries();
|
||||
|
@ -1943,7 +1943,7 @@ public class FhirResourceDaoR4QueryCountTest extends BaseResourceProviderR4Test
|
|||
outcome = mySystemDao.transaction(mySrd, input.get());
|
||||
ourLog.debug("Resp: {}", myFhirContext.newJsonParser().setPrettyPrint(true).encodeResourceToString(outcome));
|
||||
myCaptureQueriesListener.logSelectQueries();
|
||||
assertEquals(6, myCaptureQueriesListener.countSelectQueries());
|
||||
assertEquals(5, myCaptureQueriesListener.countSelectQueries());
|
||||
myCaptureQueriesListener.logInsertQueries();
|
||||
assertEquals(5, myCaptureQueriesListener.countInsertQueries());
|
||||
myCaptureQueriesListener.logUpdateQueries();
|
||||
|
@ -3365,7 +3365,7 @@ public class FhirResourceDaoR4QueryCountTest extends BaseResourceProviderR4Test
|
|||
|
||||
myCaptureQueriesListener.clear();
|
||||
Bundle outcome = mySystemDao.transaction(new SystemRequestDetails(), supplier.get());
|
||||
assertEquals(7, myCaptureQueriesListener.countSelectQueriesForCurrentThread());
|
||||
assertEquals(6, myCaptureQueriesListener.countSelectQueriesForCurrentThread());
|
||||
myCaptureQueriesListener.logInsertQueries();
|
||||
assertEquals(4, myCaptureQueriesListener.countInsertQueriesForCurrentThread());
|
||||
assertEquals(6, myCaptureQueriesListener.countUpdateQueriesForCurrentThread());
|
||||
|
@ -3388,7 +3388,7 @@ public class FhirResourceDaoR4QueryCountTest extends BaseResourceProviderR4Test
|
|||
|
||||
myCaptureQueriesListener.clear();
|
||||
outcome = mySystemDao.transaction(new SystemRequestDetails(), supplier.get());
|
||||
assertEquals(7, myCaptureQueriesListener.countSelectQueriesForCurrentThread());
|
||||
assertEquals(6, myCaptureQueriesListener.countSelectQueriesForCurrentThread());
|
||||
myCaptureQueriesListener.logInsertQueries();
|
||||
assertEquals(4, myCaptureQueriesListener.countInsertQueriesForCurrentThread());
|
||||
assertEquals(6, myCaptureQueriesListener.countUpdateQueriesForCurrentThread());
|
||||
|
@ -3449,7 +3449,7 @@ public class FhirResourceDaoR4QueryCountTest extends BaseResourceProviderR4Test
|
|||
myCaptureQueriesListener.clear();
|
||||
mySystemDao.transaction(new SystemRequestDetails(), loadResourceFromClasspath(Bundle.class, "r4/transaction-perf-bundle-smallchanges.json"));
|
||||
myCaptureQueriesListener.logSelectQueriesForCurrentThread();
|
||||
assertEquals(7, myCaptureQueriesListener.countSelectQueriesForCurrentThread());
|
||||
assertEquals(6, myCaptureQueriesListener.countSelectQueriesForCurrentThread());
|
||||
assertEquals(2, myCaptureQueriesListener.countInsertQueriesForCurrentThread());
|
||||
assertEquals(5, myCaptureQueriesListener.countUpdateQueriesForCurrentThread());
|
||||
assertEquals(0, myCaptureQueriesListener.countDeleteQueriesForCurrentThread());
|
||||
|
|
|
@ -2842,7 +2842,7 @@ public class PartitioningSqlR4Test extends BasePartitioningR4Test {
|
|||
outcome = mySystemDao.transaction(mySrd, input.get());
|
||||
ourLog.debug("Resp: {}", myFhirContext.newJsonParser().setPrettyPrint(true).encodeResourceToString(outcome));
|
||||
myCaptureQueriesListener.logSelectQueriesForCurrentThread();
|
||||
assertEquals(9, myCaptureQueriesListener.countSelectQueriesForCurrentThread());
|
||||
assertEquals(8, myCaptureQueriesListener.countSelectQueriesForCurrentThread());
|
||||
myCaptureQueriesListener.logInsertQueriesForCurrentThread();
|
||||
assertEquals(4, myCaptureQueriesListener.countInsertQueriesForCurrentThread());
|
||||
myCaptureQueriesListener.logUpdateQueriesForCurrentThread();
|
||||
|
@ -2859,7 +2859,7 @@ public class PartitioningSqlR4Test extends BasePartitioningR4Test {
|
|||
outcome = mySystemDao.transaction(mySrd, input.get());
|
||||
ourLog.debug("Resp: {}", myFhirContext.newJsonParser().setPrettyPrint(true).encodeResourceToString(outcome));
|
||||
myCaptureQueriesListener.logSelectQueriesForCurrentThread();
|
||||
assertEquals(8, myCaptureQueriesListener.countSelectQueriesForCurrentThread());
|
||||
assertEquals(7, myCaptureQueriesListener.countSelectQueriesForCurrentThread());
|
||||
myCaptureQueriesListener.logInsertQueriesForCurrentThread();
|
||||
assertEquals(4, myCaptureQueriesListener.countInsertQueriesForCurrentThread());
|
||||
myCaptureQueriesListener.logUpdateQueriesForCurrentThread();
|
||||
|
@ -2874,7 +2874,7 @@ public class PartitioningSqlR4Test extends BasePartitioningR4Test {
|
|||
outcome = mySystemDao.transaction(mySrd, input.get());
|
||||
ourLog.debug("Resp: {}", myFhirContext.newJsonParser().setPrettyPrint(true).encodeResourceToString(outcome));
|
||||
myCaptureQueriesListener.logSelectQueriesForCurrentThread();
|
||||
assertEquals(6, myCaptureQueriesListener.countSelectQueriesForCurrentThread());
|
||||
assertEquals(5, myCaptureQueriesListener.countSelectQueriesForCurrentThread());
|
||||
myCaptureQueriesListener.logInsertQueriesForCurrentThread();
|
||||
assertEquals(4, myCaptureQueriesListener.countInsertQueriesForCurrentThread());
|
||||
myCaptureQueriesListener.logUpdateQueriesForCurrentThread();
|
||||
|
@ -2924,7 +2924,7 @@ public class PartitioningSqlR4Test extends BasePartitioningR4Test {
|
|||
output = mySystemDao.transaction(requestDetails, input);
|
||||
myCaptureQueriesListener.logSelectQueries();
|
||||
|
||||
assertEquals(29, myCaptureQueriesListener.countSelectQueriesForCurrentThread());
|
||||
assertEquals(26, myCaptureQueriesListener.countSelectQueriesForCurrentThread());
|
||||
assertEquals(0, myCaptureQueriesListener.countInsertQueriesForCurrentThread());
|
||||
assertEquals(0, myCaptureQueriesListener.countUpdateQueriesForCurrentThread());
|
||||
assertEquals(0, myCaptureQueriesListener.countDeleteQueriesForCurrentThread());
|
||||
|
|
|
@ -64,7 +64,7 @@ public class ReindexStepTest extends BaseJpaR4Test {
|
|||
|
||||
// Verify
|
||||
assertEquals(2, outcome.getRecordsProcessed());
|
||||
assertEquals(6, myCaptureQueriesListener.logSelectQueries().size());
|
||||
assertEquals(5, myCaptureQueriesListener.logSelectQueries().size());
|
||||
assertEquals(0, myCaptureQueriesListener.countInsertQueries());
|
||||
myCaptureQueriesListener.logUpdateQueries();
|
||||
assertEquals(0, myCaptureQueriesListener.countUpdateQueries());
|
||||
|
@ -95,7 +95,7 @@ public class ReindexStepTest extends BaseJpaR4Test {
|
|||
|
||||
// Verify
|
||||
assertEquals(2, outcome.getRecordsProcessed());
|
||||
assertEquals(8, myCaptureQueriesListener.logSelectQueries().size());
|
||||
assertEquals(7, myCaptureQueriesListener.logSelectQueries().size());
|
||||
assertEquals(0, myCaptureQueriesListener.countInsertQueries());
|
||||
assertEquals(0, myCaptureQueriesListener.countUpdateQueries());
|
||||
assertEquals(0, myCaptureQueriesListener.countDeleteQueries());
|
||||
|
@ -128,7 +128,7 @@ public class ReindexStepTest extends BaseJpaR4Test {
|
|||
|
||||
// Verify
|
||||
assertEquals(2, outcome.getRecordsProcessed());
|
||||
assertEquals(6, myCaptureQueriesListener.logSelectQueries().size());
|
||||
assertEquals(5, myCaptureQueriesListener.logSelectQueries().size());
|
||||
// name, family, phonetic, deceased, active
|
||||
assertEquals(5, myCaptureQueriesListener.countInsertQueries());
|
||||
assertEquals(0, myCaptureQueriesListener.countUpdateQueries());
|
||||
|
@ -196,7 +196,7 @@ public class ReindexStepTest extends BaseJpaR4Test {
|
|||
|
||||
// Verify
|
||||
assertEquals(2, outcome.getRecordsProcessed());
|
||||
assertEquals(10, myCaptureQueriesListener.logSelectQueries().size());
|
||||
assertEquals(9, myCaptureQueriesListener.logSelectQueries().size());
|
||||
assertEquals(0, myCaptureQueriesListener.countInsertQueries());
|
||||
assertEquals(4, myCaptureQueriesListener.countUpdateQueries());
|
||||
assertEquals(0, myCaptureQueriesListener.countDeleteQueries());
|
||||
|
@ -241,7 +241,7 @@ public class ReindexStepTest extends BaseJpaR4Test {
|
|||
|
||||
// Verify
|
||||
assertEquals(4, outcome.getRecordsProcessed());
|
||||
assertEquals(9, myCaptureQueriesListener.logSelectQueries().size());
|
||||
assertEquals(8, myCaptureQueriesListener.logSelectQueries().size());
|
||||
assertEquals(5, myCaptureQueriesListener.countInsertQueries());
|
||||
assertEquals(2, myCaptureQueriesListener.countUpdateQueries());
|
||||
assertEquals(0, myCaptureQueriesListener.countDeleteQueries());
|
||||
|
|
|
@ -4,6 +4,7 @@ import ca.uhn.fhir.context.FhirContext;
|
|||
import ca.uhn.fhir.jpa.api.config.JpaStorageSettings;
|
||||
import ca.uhn.fhir.rest.server.exceptions.PreconditionFailedException;
|
||||
import ca.uhn.fhir.util.BundleBuilder;
|
||||
import jakarta.annotation.Nonnull;
|
||||
import org.hl7.fhir.r5.model.BooleanType;
|
||||
import org.hl7.fhir.r5.model.Bundle;
|
||||
import org.hl7.fhir.r5.model.CodeType;
|
||||
|
@ -20,7 +21,6 @@ import org.junit.jupiter.params.ParameterizedTest;
|
|||
import org.junit.jupiter.params.provider.CsvSource;
|
||||
import org.junit.jupiter.params.provider.ValueSource;
|
||||
|
||||
import jakarta.annotation.Nonnull;
|
||||
import java.io.IOException;
|
||||
import java.util.UUID;
|
||||
|
||||
|
@ -28,7 +28,6 @@ import static org.apache.commons.lang3.StringUtils.countMatches;
|
|||
import static org.hamcrest.MatcherAssert.assertThat;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.endsWith;
|
||||
import static org.hamcrest.Matchers.in;
|
||||
import static org.hamcrest.Matchers.matchesPattern;
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertNotEquals;
|
||||
|
@ -151,7 +150,7 @@ public class FhirSystemDaoTransactionR5Test extends BaseJpaR5Test {
|
|||
|
||||
// Verify
|
||||
|
||||
assertEquals(theMatchUrlCacheEnabled ? 4 : 5, myCaptureQueriesListener.countSelectQueriesForCurrentThread());
|
||||
assertEquals(theMatchUrlCacheEnabled ? 3 : 4, myCaptureQueriesListener.countSelectQueriesForCurrentThread());
|
||||
assertEquals(0, myCaptureQueriesListener.countInsertQueriesForCurrentThread());
|
||||
assertEquals(0, myCaptureQueriesListener.countUpdateQueriesForCurrentThread());
|
||||
assertEquals(0, myCaptureQueriesListener.countDeleteQueriesForCurrentThread());
|
||||
|
@ -203,7 +202,7 @@ public class FhirSystemDaoTransactionR5Test extends BaseJpaR5Test {
|
|||
|
||||
// Verify
|
||||
|
||||
assertEquals(theMatchUrlCacheEnabled ? 4 : 5, myCaptureQueriesListener.countSelectQueriesForCurrentThread());
|
||||
assertEquals(theMatchUrlCacheEnabled ? 3 : 4, myCaptureQueriesListener.countSelectQueriesForCurrentThread());
|
||||
assertEquals(0, myCaptureQueriesListener.countInsertQueriesForCurrentThread());
|
||||
assertEquals(0, myCaptureQueriesListener.countUpdateQueriesForCurrentThread());
|
||||
assertEquals(0, myCaptureQueriesListener.countDeleteQueriesForCurrentThread());
|
||||
|
|
|
@ -401,7 +401,7 @@ public class UpliftedRefchainsAndChainedSortingR5Test extends BaseJpaR5Test {
|
|||
|
||||
// 1- Resolve resource forced IDs, and 2- Resolve Practitioner/PR1 reference
|
||||
myCaptureQueriesListener.logSelectQueriesForCurrentThread();
|
||||
assertEquals(10, myCaptureQueriesListener.countSelectQueriesForCurrentThread());
|
||||
assertEquals(9, myCaptureQueriesListener.countSelectQueriesForCurrentThread());
|
||||
|
||||
// Verify correct indexes are written
|
||||
|
||||
|
@ -441,7 +441,7 @@ public class UpliftedRefchainsAndChainedSortingR5Test extends BaseJpaR5Test {
|
|||
bb.addTransactionUpdateEntry(newEncounter(ENCOUNTER_E2, p2Id));
|
||||
bb.addTransactionCreateEntry(newPatientP1_HomerSimpson().setId(p1Id)).conditional("identifier=http://system|200");
|
||||
bb.addTransactionCreateEntry(newPatientP2_MargeSimpson().setId(p2Id)).conditional("identifier=http://system|300");
|
||||
;
|
||||
|
||||
Bundle requestBundle = bb.getBundleTyped();
|
||||
|
||||
myCaptureQueriesListener.clear();
|
||||
|
@ -496,7 +496,7 @@ public class UpliftedRefchainsAndChainedSortingR5Test extends BaseJpaR5Test {
|
|||
bb.addTransactionUpdateEntry(newEncounter(ENCOUNTER_E2, p2Id));
|
||||
bb.addTransactionCreateEntry(new Patient().addIdentifier(new Identifier().setSystem("http://system").setValue("200")).setId(p1Id)).conditional("identifier=http://system|200");
|
||||
bb.addTransactionCreateEntry(new Patient().addIdentifier(new Identifier().setSystem("http://system").setValue("300")).setId(p2Id)).conditional("identifier=http://system|300");
|
||||
;
|
||||
|
||||
Bundle requestBundle = bb.getBundleTyped();
|
||||
|
||||
myCaptureQueriesListener.clear();
|
||||
|
|
Loading…
Reference in New Issue