Merge branch 'master' into rel_5_4

This commit is contained in:
jamesagnew 2021-05-07 17:44:47 -04:00
commit 176be4df16
14 changed files with 635 additions and 116 deletions

View File

@ -0,0 +1,7 @@
---
type: fix
issue: 2641
title: "A vulnerability in the FHIR History operation was resolved. When running HAPI FHIR JPA server on a large database (i.e. containing
a large number of resources), if a malicious user performs a large number of concurrent FHIR History (`_history`) operations,
an expensive `COUNT()` statement can consume all available database resources and ultimately trigger resource exhaustion and
disable the server. A huge thanks to **Zachary Minneker at Security Innovation** who discovered and submitted a responsible disclosure of this issue."

View File

@ -1,6 +1,19 @@
# Performance
This page contains information for performance optimization.
This page contains information for performance optimization. If you are planning a production deployment, you should consider the options discussed here as they may have significant impacts on your ability to scale.
# History Counting
The FHIR history operation allows clients to see a change history for a resource, across all resources of a given type, or even across all resources on a server. This operation includes a total count (in `Bundle.total`) that can be very expensive to calculate on large databases with many resources.
As a result, a setting on the `DaoConfig` object has been added called **History Count Mode**. This setting has 3 possible options:
* COUNT_CACHED. This is the new default: A loading cache will be used for history counts without any dates specified, meaning that counts are stored in RAM for up to one minute, and the loading cache blocks all but one client thread per JVM from actually performing the count. This effectively throttles access to the database. History operation invocations that include a `_since` or `_to` parameter will never have a count included in the results.
* COUNT_ACCURATE: This option always uses a fresh count statement for all history invocations. This means that the count total in the History bundle is guaranteed to be accurate every time. Note that this means that users may trigger a large amount of potentially expensive database operations by performing a large number of history operations. Do not use this option in situations where you have untrusted users accessing your server.
* COUNT_DISABLED: This setting avoids the count query entirely, saving time and avoiding any risk of expensive count queries at the expense of not including any total in the response.
# Bulk Loading

View File

@ -1,5 +1,6 @@
package ca.uhn.fhir.jpa.api.config;
import ca.uhn.fhir.jpa.api.model.HistoryCountModeEnum;
import ca.uhn.fhir.jpa.api.model.WarmCacheEntry;
import ca.uhn.fhir.jpa.model.entity.ModelConfig;
import ca.uhn.fhir.jpa.model.entity.ResourceEncodingEnum;
@ -74,6 +75,7 @@ public class DaoConfig {
)));
// update setter javadoc if default changes
public static final int DEFAULT_MAX_EXPANSION_SIZE = 1000;
public static final HistoryCountModeEnum DEFAULT_HISTORY_COUNT_MODE = HistoryCountModeEnum.CACHED_ONLY_WITHOUT_OFFSET;
/**
* Default value for {@link #setMaximumSearchResultCountInTransaction(Integer)}
*
@ -84,13 +86,18 @@ public class DaoConfig {
private static final Logger ourLog = LoggerFactory.getLogger(DaoConfig.class);
private static final int DEFAULT_EXPUNGE_BATCH_SIZE = 800;
private static final int DEFAULT_MAXIMUM_DELETE_CONFLICT_COUNT = 60;
private IndexEnabledEnum myIndexMissingFieldsEnabled = IndexEnabledEnum.DISABLED;
/**
* Child Configurations
*/
private final ModelConfig myModelConfig = new ModelConfig();
/**
* Do not change default of {@code 0}!
*
* @since 4.1.0
*/
private final int myPreExpandValueSetsDefaultOffset = 0;
private IndexEnabledEnum myIndexMissingFieldsEnabled = IndexEnabledEnum.DISABLED;
/**
* update setter javadoc if default changes
*/
@ -117,7 +124,6 @@ public class DaoConfig {
private Integer myFetchSizeDefaultMaximum = null;
private int myMaximumExpansionSize = DEFAULT_MAX_EXPANSION_SIZE;
private Integer myMaximumSearchResultCountInTransaction = DEFAULT_MAXIMUM_SEARCH_RESULT_COUNT_IN_TRANSACTION;
private Integer myMaximumTransactionBundleSize = DEFAULT_MAXIMUM_TRANSACTION_BUNDLE_SIZE;
private ResourceEncodingEnum myResourceEncoding = ResourceEncodingEnum.JSONC;
/**
@ -148,6 +154,8 @@ public class DaoConfig {
private ClientIdStrategyEnum myResourceClientIdStrategy = ClientIdStrategyEnum.ALPHANUMERIC;
private boolean myFilterParameterEnabled = false;
private StoreMetaSourceInformationEnum myStoreMetaSourceInformation = StoreMetaSourceInformationEnum.SOURCE_URI_AND_REQUEST_ID;
private HistoryCountModeEnum myHistoryCountMode = DEFAULT_HISTORY_COUNT_MODE;
/**
* update setter javadoc if default changes
*/
@ -158,12 +166,6 @@ public class DaoConfig {
* @since 4.1.0
*/
private boolean myPreExpandValueSets = true;
/**
* Do not change default of {@code 0}!
*
* @since 4.1.0
*/
private final int myPreExpandValueSetsDefaultOffset = 0;
/**
* Do not change default of {@code 1000}!
*
@ -176,14 +178,12 @@ public class DaoConfig {
* @since 4.1.0
*/
private int myPreExpandValueSetsMaxCount = 1000;
/**
* Do not change default of {@code true}!
*
* @since 4.2.0
*/
private boolean myPopulateIdentifierInAutoCreatedPlaceholderReferenceTargets = true;
/**
* @since 5.0.0
*/
@ -221,6 +221,52 @@ public class DaoConfig {
}
}
/**
* When performing a FHIR history operation, a <code>Bundle.total</code> value is included in the
* response, indicating the total number of history entries. This response is calculated using a
* SQL COUNT query statement which can be expensive. This setting allows the results of the count
* query to be cached, resulting in a much lighter load on the server, at the expense of
* returning total values that may be slightly out of date. Total counts can also be disabled,
* or forced to always be accurate.
* <p>
* In {@link HistoryCountModeEnum#CACHED_ONLY_WITHOUT_OFFSET} mode, a loading cache is used to fetch the value,
* meaning that only one thread per JVM will fetch the count, and others will block while waiting
* for the cache to load, avoiding excessive load on the database.
* </p>
* <p>
* Default is {@link HistoryCountModeEnum#CACHED_ONLY_WITHOUT_OFFSET}
* </p>
*
* @since 5.4.0
*/
public HistoryCountModeEnum getHistoryCountMode() {
return myHistoryCountMode;
}
/**
* When performing a FHIR history operation, a <code>Bundle.total</code> value is included in the
* response, indicating the total number of history entries. This response is calculated using a
* SQL COUNT query statement which can be expensive. This setting allows the results of the count
* query to be cached, resulting in a much lighter load on the server, at the expense of
* returning total values that may be slightly out of date. Total counts can also be disabled,
* or forced to always be accurate.
* <p>
* In {@link HistoryCountModeEnum#CACHED_ONLY_WITHOUT_OFFSET} mode, a loading cache is used to fetch the value,
* meaning that only one thread per JVM will fetch the count, and others will block while waiting
* for the cache to load, avoiding excessive load on the database.
* </p>
* <p>
* Default is {@link HistoryCountModeEnum#CACHED_ONLY_WITHOUT_OFFSET}
* </p>
*
* @since 5.4.0
*/
public void setHistoryCountMode(@Nonnull HistoryCountModeEnum theHistoryCountMode) {
Validate.notNull(theHistoryCountMode, "theHistoryCountMode must not be null");
myHistoryCountMode = theHistoryCountMode;
}
/**
* If set to <code>true</code> (default is <code>false</code>) the <code>$lastn</code> operation will be enabled for
* indexing Observation resources. This operation involves creating a special set of tables in ElasticSearch for
@ -257,6 +303,18 @@ public class DaoConfig {
return myUseLegacySearchBuilder;
}
/**
* This method controls whether to use the new non-hibernate search SQL builder that was introduced in HAPI FHIR 5.2.0.
* By default this will be <code>false</code> meaning that the new SQL builder is used. Set to <code>true</code> to use the
* legacy SQL builder based on Hibernate.
* <p>Note that this method will be removed in HAPI FHIR 5.4.0</p>
*
* @since 5.3.0
*/
public void setUseLegacySearchBuilder(boolean theUseLegacySearchBuilder) {
myUseLegacySearchBuilder = theUseLegacySearchBuilder;
}
/**
* Specifies the duration in minutes for which values will be retained after being
* written to the terminology translation cache. Defaults to 60.
@ -271,21 +329,7 @@ public class DaoConfig {
* cached in an in-memory cache. This cache can have a noticeable improvement on write performance on servers
* where conditional operations are frequently performed, but note that this cache will not be
* invalidated based on updates to resources so this may have detrimental effects.
*
* Default is <code>false</code>
*
* @since 5.4.0
*/
public void setMatchUrlCache(boolean theMatchUrlCache) {
myMatchUrlCache = theMatchUrlCache;
}
/**
* If enabled, resolutions for match URLs (e.g. conditional create URLs, conditional update URLs, etc) will be
* cached in an in-memory cache. This cache can have a noticeable improvement on write performance on servers
* where conditional operations are frequently performed, but note that this cache will not be
* invalidated based on updates to resources so this may have detrimental effects.
*
* <p>
* Default is <code>false</code>
*
* @since 5.4.0
@ -295,15 +339,17 @@ public class DaoConfig {
}
/**
* This method controls whether to use the new non-hibernate search SQL builder that was introduced in HAPI FHIR 5.2.0.
* By default this will be <code>false</code> meaning that the new SQL builder is used. Set to <code>true</code> to use the
* legacy SQL builder based on Hibernate.
* <p>Note that this method will be removed in HAPI FHIR 5.4.0</p>
* If enabled, resolutions for match URLs (e.g. conditional create URLs, conditional update URLs, etc) will be
* cached in an in-memory cache. This cache can have a noticeable improvement on write performance on servers
* where conditional operations are frequently performed, but note that this cache will not be
* invalidated based on updates to resources so this may have detrimental effects.
* <p>
* Default is <code>false</code>
*
* @since 5.3.0
* @since 5.4.0
*/
public void setUseLegacySearchBuilder(boolean theUseLegacySearchBuilder) {
myUseLegacySearchBuilder = theUseLegacySearchBuilder;
public void setMatchUrlCache(boolean theMatchUrlCache) {
myMatchUrlCache = theMatchUrlCache;
}
/**
@ -652,7 +698,7 @@ public class DaoConfig {
* to not index missing field.
* </p>
* <p>
* The following index may need to be added into the indexed tables such as <code>HFJ_SPIDX_TOKEN</code>
* The following index may need to be added into the indexed tables such as <code>HFJ_SPIDX_TOKEN</code>
* to improve the search performance while <code>:missing</code> is enabled.
* <code>RES_TYPE, SP_NAME, SP_MISSING</code>
* </p>
@ -1079,7 +1125,7 @@ public class DaoConfig {
* This property can be useful in cases where replication between two servers is wanted.
* Note however that references containing purely numeric IDs will not be auto-created
* as they are never allowed to be client supplied in HAPI FHIR JPA.
*
* <p>
* All placeholder resources created in this way have an extension
* with the URL {@link HapiExtensions#EXT_RESOURCE_PLACEHOLDER} and the value "true".
* </p>
@ -1103,7 +1149,7 @@ public class DaoConfig {
* This property can be useful in cases where replication between two servers is wanted.
* Note however that references containing purely numeric IDs will not be auto-created
* as they are never allowed to be client supplied in HAPI FHIR JPA.
*
* <p>
* All placeholder resources created in this way have an extension
* with the URL {@link HapiExtensions#EXT_RESOURCE_PLACEHOLDER} and the value "true".
* </p>
@ -1386,11 +1432,8 @@ public class DaoConfig {
}
/**
* If set to <code>true</code> (default is <code>false</code>), the _expunge parameter on the DELETE
* operation will be enabled on this server. DELETE _expunge removes all data associated with a resource in a highly performant
* way, skipping most of the the checks that are enforced with usual DELETE operations. The only check
* that is performed before deleting the resources and their indexes is that no other resources reference the resources about to
* be deleted. This operation is potentially dangerous since it allows
* If set to <code>true</code> (default is <code>false</code>), the $expunge operation
* will be enabled on this server. This operation is potentially dangerous since it allows
* a client to physically delete data in a way that can not be recovered (without resorting
* to backups).
* <p>
@ -1399,8 +1442,8 @@ public class DaoConfig {
* operation.
* </p>
*/
public void setDeleteExpungeEnabled(boolean theDeleteExpungeEnabled) {
myDeleteExpungeEnabled = theDeleteExpungeEnabled;
public void setExpungeEnabled(boolean theExpungeEnabled) {
myExpungeEnabled = theExpungeEnabled;
}
/**
@ -1422,8 +1465,11 @@ public class DaoConfig {
}
/**
* If set to <code>true</code> (default is <code>false</code>), the $expunge operation
* will be enabled on this server. This operation is potentially dangerous since it allows
* If set to <code>true</code> (default is <code>false</code>), the _expunge parameter on the DELETE
* operation will be enabled on this server. DELETE _expunge removes all data associated with a resource in a highly performant
* way, skipping most of the the checks that are enforced with usual DELETE operations. The only check
* that is performed before deleting the resources and their indexes is that no other resources reference the resources about to
* be deleted. This operation is potentially dangerous since it allows
* a client to physically delete data in a way that can not be recovered (without resorting
* to backups).
* <p>
@ -1432,8 +1478,8 @@ public class DaoConfig {
* operation.
* </p>
*/
public void setExpungeEnabled(boolean theExpungeEnabled) {
myExpungeEnabled = theExpungeEnabled;
public void setDeleteExpungeEnabled(boolean theDeleteExpungeEnabled) {
myDeleteExpungeEnabled = theDeleteExpungeEnabled;
}
/**
@ -1749,6 +1795,7 @@ public class DaoConfig {
* <p>
* Default is <code>false</code>
* </p>
*
* @since 5.4.0
*/
public boolean isAllowMdmExpansion() {
@ -1764,6 +1811,7 @@ public class DaoConfig {
* <p>
* Default is <code>false</code>
* </p>
*
* @since 5.4.0
*/
public void setAllowMdmExpansion(boolean theAllowMdmExpansion) {

View File

@ -0,0 +1,22 @@
package ca.uhn.fhir.jpa.api.model;
public enum HistoryCountModeEnum {
/**
* Always include an accurate count in the response
*/
COUNT_ACCURATE,
/**
* For history invocations with no offset (i.e. no since-date specified), always include a count in the response,
* but cache the count so that the count may be slightly out of date (but resource usage will be much lower). For
* history invocations with an offset, never return a count.
*/
CACHED_ONLY_WITHOUT_OFFSET,
/**
* Do not include a count in history responses
*/
COUNT_DISABLED
}

View File

@ -25,6 +25,7 @@ import ca.uhn.fhir.interceptor.api.HookParams;
import ca.uhn.fhir.interceptor.api.IInterceptorBroadcaster;
import ca.uhn.fhir.interceptor.api.Pointcut;
import ca.uhn.fhir.interceptor.model.RequestPartitionId;
import ca.uhn.fhir.jpa.api.config.DaoConfig;
import ca.uhn.fhir.jpa.api.dao.DaoRegistry;
import ca.uhn.fhir.jpa.api.dao.IFhirResourceDao;
import ca.uhn.fhir.jpa.api.svc.ISearchCoordinatorSvc;
@ -34,14 +35,14 @@ import ca.uhn.fhir.jpa.dao.ISearchBuilder;
import ca.uhn.fhir.jpa.dao.SearchBuilderFactory;
import ca.uhn.fhir.jpa.entity.Search;
import ca.uhn.fhir.jpa.entity.SearchTypeEnum;
import ca.uhn.fhir.jpa.search.cache.SearchCacheStatusEnum;
import ca.uhn.fhir.rest.api.server.storage.ResourcePersistentId;
import ca.uhn.fhir.jpa.model.entity.BaseHasResource;
import ca.uhn.fhir.jpa.model.entity.ResourceHistoryTable;
import ca.uhn.fhir.jpa.partition.RequestPartitionHelperSvc;
import ca.uhn.fhir.jpa.search.cache.ISearchCacheSvc;
import ca.uhn.fhir.jpa.search.cache.SearchCacheStatusEnum;
import ca.uhn.fhir.jpa.util.InterceptorUtil;
import ca.uhn.fhir.jpa.util.JpaInterceptorBroadcaster;
import ca.uhn.fhir.jpa.util.MemoryCacheService;
import ca.uhn.fhir.model.primitive.InstantDt;
import ca.uhn.fhir.rest.api.server.IBundleProvider;
import ca.uhn.fhir.rest.api.server.IPreResourceAccessDetails;
@ -49,6 +50,7 @@ import ca.uhn.fhir.rest.api.server.IPreResourceShowDetails;
import ca.uhn.fhir.rest.api.server.RequestDetails;
import ca.uhn.fhir.rest.api.server.SimplePreResourceAccessDetails;
import ca.uhn.fhir.rest.api.server.SimplePreResourceShowDetails;
import ca.uhn.fhir.rest.api.server.storage.ResourcePersistentId;
import ca.uhn.fhir.rest.server.servlet.ServletRequestDetails;
import com.google.common.annotations.VisibleForTesting;
import org.hl7.fhir.instance.model.api.IBaseResource;
@ -70,6 +72,7 @@ import java.util.HashSet;
import java.util.List;
import java.util.Optional;
import java.util.Set;
import java.util.function.Function;
public class PersistedJpaBundleProvider implements IBundleProvider {
@ -78,7 +81,9 @@ public class PersistedJpaBundleProvider implements IBundleProvider {
/*
* Autowired fields
*/
private final RequestDetails myRequest;
@Autowired
protected PlatformTransactionManager myTxManager;
@PersistenceContext
private EntityManager myEntityManager;
@Autowired
@ -90,8 +95,6 @@ public class PersistedJpaBundleProvider implements IBundleProvider {
@Autowired
private DaoRegistry myDaoRegistry;
@Autowired
protected PlatformTransactionManager myTxManager;
@Autowired
private FhirContext myContext;
@Autowired
private ISearchCoordinatorSvc mySearchCoordinatorSvc;
@ -99,13 +102,15 @@ public class PersistedJpaBundleProvider implements IBundleProvider {
private ISearchCacheSvc mySearchCacheSvc;
@Autowired
private RequestPartitionHelperSvc myRequestPartitionHelperSvc;
@Autowired
private DaoConfig myDaoConfig;
/*
* Non autowired fields (will be different for every instance
* of this class, since it's a prototype
*/
private final RequestDetails myRequest;
@Autowired
private MemoryCacheService myMemoryCacheService;
private Search mySearchEntity;
private String myUuid;
private SearchCacheStatusEnum myCacheStatus;
@ -241,17 +246,57 @@ public class PersistedJpaBundleProvider implements IBundleProvider {
if (mySearchEntity.getSearchType() == SearchTypeEnum.HISTORY) {
if (mySearchEntity.getTotalCount() == null) {
new TransactionTemplate(myTxManager).executeWithoutResult(t->{
HistoryBuilder historyBuilder = myHistoryBuilderFactory.newHistoryBuilder(mySearchEntity.getResourceType(), mySearchEntity.getResourceId(), mySearchEntity.getLastUpdatedLow(), mySearchEntity.getLastUpdatedHigh());
Long count = historyBuilder.fetchCount(getRequestPartitionId());
mySearchEntity.setTotalCount(count.intValue());
});
calculateHistoryCount();
}
}
return true;
}
/**
* Note that this method is called outside a DB transaction, and uses a loading cache
* (assuming the default {@literal COUNT_CACHED} mode) so this effectively throttles
* access to the database by preventing multiple concurrent DB calls for an expensive
* count operation.
*/
private void calculateHistoryCount() {
MemoryCacheService.HistoryCountKey key;
if (mySearchEntity.getResourceId() != null) {
key = MemoryCacheService.HistoryCountKey.forInstance(mySearchEntity.getResourceId());
} else if (mySearchEntity.getResourceType() != null) {
key = MemoryCacheService.HistoryCountKey.forType(mySearchEntity.getResourceType());
} else {
key = MemoryCacheService.HistoryCountKey.forSystem();
}
Function<MemoryCacheService.HistoryCountKey, Integer> supplier = k -> new TransactionTemplate(myTxManager).execute(t -> {
HistoryBuilder historyBuilder = myHistoryBuilderFactory.newHistoryBuilder(mySearchEntity.getResourceType(), mySearchEntity.getResourceId(), mySearchEntity.getLastUpdatedLow(), mySearchEntity.getLastUpdatedHigh());
Long count = historyBuilder.fetchCount(getRequestPartitionId());
return count.intValue();
});
boolean haveOffset = mySearchEntity.getLastUpdatedLow() != null || mySearchEntity.getLastUpdatedHigh() != null;
switch (myDaoConfig.getHistoryCountMode()) {
case COUNT_ACCURATE: {
int count = supplier.apply(key);
mySearchEntity.setTotalCount(count);
break;
}
case CACHED_ONLY_WITHOUT_OFFSET: {
if (!haveOffset) {
int count = myMemoryCacheService.get(MemoryCacheService.CacheEnum.HISTORY_COUNT, key, supplier);
mySearchEntity.setTotalCount(count);
}
break;
}
case COUNT_DISABLED: {
break;
}
}
}
@Override
public InstantDt getPublished() {
ensureSearchEntityLoaded();

View File

@ -28,6 +28,7 @@ import org.hl7.fhir.dstu3.model.InstantType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nonnull;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
@ -192,6 +193,7 @@ public class CircularQueueCaptureQueriesListener extends BaseCaptureQueriesListe
/**
* Log all captured SELECT queries
*
* @return
*/
public String logSelectQueriesForCurrentThread(int... theIndexes) {
@ -217,15 +219,23 @@ public class CircularQueueCaptureQueriesListener extends BaseCaptureQueriesListe
* Log all captured SELECT queries
*/
public List<SqlQuery> logSelectQueries() {
return logSelectQueries(true, true);
}
/**
* Log all captured SELECT queries
*/
public List<SqlQuery> logSelectQueries(boolean theInlineParams, boolean theFormatSql) {
List<SqlQuery> queries = getSelectQueries();
List<String> queriesStrings = queries
.stream()
.map(CircularQueueCaptureQueriesListener::formatQueryAsSql)
.map(t -> CircularQueueCaptureQueriesListener.formatQueryAsSql(t, theInlineParams, theFormatSql))
.collect(Collectors.toList());
ourLog.info("Select Queries:\n{}", String.join("\n", queriesStrings));
return queries;
}
/**
* Log first captured SELECT query
*/
@ -353,8 +363,16 @@ public class CircularQueueCaptureQueriesListener extends BaseCaptureQueriesListe
}
@Nonnull
static String formatQueryAsSql(SqlQuery theQuery) {
String formattedSql = theQuery.getSql(true, true);
boolean inlineParams = true;
boolean formatSql = true;
return formatQueryAsSql(theQuery, inlineParams, formatSql);
}
@Nonnull
static String formatQueryAsSql(SqlQuery theQuery, boolean inlineParams, boolean formatSql) {
String formattedSql = theQuery.getSql(inlineParams, formatSql);
StringBuilder b = new StringBuilder();
b.append("SqlQuery at ");
b.append(new InstantType(new Date(theQuery.getQueryTimestamp())).getValueAsString());

View File

@ -27,16 +27,18 @@ import com.github.benmanes.caffeine.cache.Cache;
import com.github.benmanes.caffeine.cache.Caffeine;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.commons.lang3.tuple.Pair;
import org.hl7.fhir.instance.model.api.IIdType;
import org.springframework.beans.factory.annotation.Autowired;
import javax.annotation.Nonnull;
import javax.annotation.PostConstruct;
import java.util.EnumMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import static org.apache.commons.lang3.StringUtils.isNotBlank;
/**
* This class acts as a central spot for all of the many Caffeine caches we use in HAPI FHIR.
* <p>
@ -120,7 +122,8 @@ public class MemoryCacheService {
CONCEPT_TRANSLATION(TranslationQuery.class),
MATCH_URL(String.class),
CONCEPT_TRANSLATION_REVERSE(TranslationQuery.class),
RESOURCE_CONDITIONAL_CREATE_VERSION(IIdType.class);
RESOURCE_CONDITIONAL_CREATE_VERSION(IIdType.class),
HISTORY_COUNT(HistoryCountKey.class);
private final Class<?> myKeyType;
@ -137,6 +140,17 @@ public class MemoryCacheService {
private final String myCode;
private final int myHashCode;
public TagDefinitionCacheKey(TagTypeEnum theType, String theSystem, String theCode) {
myType = theType;
mySystem = theSystem;
myCode = theCode;
myHashCode = new HashCodeBuilder(17, 37)
.append(myType)
.append(mySystem)
.append(myCode)
.toHashCode();
}
@Override
public boolean equals(Object theO) {
boolean retVal = false;
@ -156,17 +170,49 @@ public class MemoryCacheService {
public int hashCode() {
return myHashCode;
}
}
public TagDefinitionCacheKey(TagTypeEnum theType, String theSystem, String theCode) {
myType = theType;
mySystem = theSystem;
myCode = theCode;
myHashCode = new HashCodeBuilder(17, 37)
.append(myType)
.append(mySystem)
.append(myCode)
.toHashCode();
public static class HistoryCountKey {
private final String myTypeName;
private final Long myInstanceId;
private final int myHashCode;
private HistoryCountKey(String theTypeName, Long theInstanceId) {
myTypeName = theTypeName;
myInstanceId = theInstanceId;
myHashCode = new HashCodeBuilder().append(myTypeName).append(myInstanceId).toHashCode();
}
@Override
public boolean equals(Object theO) {
boolean retVal = false;
if (theO instanceof HistoryCountKey) {
HistoryCountKey that = (HistoryCountKey) theO;
retVal = new EqualsBuilder().append(myTypeName, that.myTypeName).append(myInstanceId, that.myInstanceId).isEquals();
}
return retVal;
}
@Override
public int hashCode() {
return myHashCode;
}
public static HistoryCountKey forSystem() {
return new HistoryCountKey(null, null);
}
public static HistoryCountKey forType(@Nonnull String theType) {
assert isNotBlank(theType);
return new HistoryCountKey(theType, null);
}
public static HistoryCountKey forInstance(@Nonnull Long theInstanceId) {
assert theInstanceId != null;
return new HistoryCountKey(null, theInstanceId);
}
}
}

View File

@ -1,6 +1,7 @@
package ca.uhn.fhir.jpa.dao.dstu2;
import ca.uhn.fhir.jpa.api.config.DaoConfig;
import ca.uhn.fhir.jpa.api.model.HistoryCountModeEnum;
import ca.uhn.fhir.jpa.dao.BaseHapiFhirDao;
import ca.uhn.fhir.jpa.dao.BaseHapiFhirResourceDao;
import ca.uhn.fhir.jpa.dao.data.IForcedIdDao;
@ -120,6 +121,7 @@ public class FhirResourceDaoDstu2Test extends BaseJpaDstu2Test {
myDaoConfig.setAllowExternalReferences(new DaoConfig().isAllowExternalReferences());
myDaoConfig.setTreatReferencesAsLogical(new DaoConfig().getTreatReferencesAsLogical());
myDaoConfig.setEnforceReferentialIntegrityOnDelete(new DaoConfig().isEnforceReferentialIntegrityOnDelete());
myDaoConfig.setHistoryCountMode(DaoConfig.DEFAULT_HISTORY_COUNT_MODE);
}
private void assertGone(IIdType theId) {
@ -651,6 +653,8 @@ public class FhirResourceDaoDstu2Test extends BaseJpaDstu2Test {
@Test
public void testDeleteResource() {
myDaoConfig.setHistoryCountMode(HistoryCountModeEnum.COUNT_ACCURATE);
int initialHistory = myPatientDao.history(null, null, mySrd).size();
IIdType id1;
@ -694,7 +698,7 @@ public class FhirResourceDaoDstu2Test extends BaseJpaDstu2Test {
}
IBundleProvider history = myPatientDao.history(null, null, mySrd);
assertEquals(4 + initialHistory, history.size().intValue());
assertEquals(4 + initialHistory, history.sizeOrThrowNpe());
List<IBaseResource> resources = history.getResources(0, 4);
assertNotNull(ResourceMetadataKeyEnum.DELETED_AT.get((IResource) resources.get(0)));
@ -1036,6 +1040,7 @@ public class FhirResourceDaoDstu2Test extends BaseJpaDstu2Test {
@Test
public void testHistoryOverMultiplePages() throws Exception {
myDaoConfig.setHistoryCountMode(HistoryCountModeEnum.COUNT_ACCURATE);
String methodName = "testHistoryOverMultiplePages";
/*

View File

@ -2,6 +2,7 @@ package ca.uhn.fhir.jpa.dao.dstu3;
import ca.uhn.fhir.jpa.api.config.DaoConfig;
import ca.uhn.fhir.jpa.api.dao.IFhirResourceDao;
import ca.uhn.fhir.jpa.api.model.HistoryCountModeEnum;
import ca.uhn.fhir.jpa.dao.BaseHapiFhirDao;
import ca.uhn.fhir.jpa.model.entity.ResourceIndexedSearchParamString;
import ca.uhn.fhir.jpa.model.entity.TagTypeEnum;
@ -126,6 +127,7 @@ public class FhirResourceDaoDstu3Test extends BaseJpaDstu3Test {
myDaoConfig.setAllowExternalReferences(new DaoConfig().isAllowExternalReferences());
myDaoConfig.setTreatReferencesAsLogical(new DaoConfig().getTreatReferencesAsLogical());
myDaoConfig.setIndexMissingFields(new DaoConfig().getIndexMissingFields());
myDaoConfig.setHistoryCountMode(DaoConfig.DEFAULT_HISTORY_COUNT_MODE);
}
private void assertGone(IIdType theId) {
@ -895,6 +897,8 @@ public class FhirResourceDaoDstu3Test extends BaseJpaDstu3Test {
@Test
public void testDeleteResource() {
myDaoConfig.setHistoryCountMode(HistoryCountModeEnum.COUNT_ACCURATE);
int initialHistory = myPatientDao.history(null, null, mySrd).size();
IIdType id1;
@ -1282,6 +1286,7 @@ public class FhirResourceDaoDstu3Test extends BaseJpaDstu3Test {
@Test
public void testHistoryOverMultiplePages() throws Exception {
myDaoConfig.setIndexMissingFields(DaoConfig.IndexEnabledEnum.DISABLED);
myDaoConfig.setHistoryCountMode(HistoryCountModeEnum.COUNT_ACCURATE);
String methodName = "testHistoryOverMultiplePages";
@ -1432,7 +1437,9 @@ public class FhirResourceDaoDstu3Test extends BaseJpaDstu3Test {
}
@Test
public void testHistoryReflectsMetaOperations() throws Exception {
public void testHistoryReflectsMetaOperations() {
myDaoConfig.setHistoryCountMode(HistoryCountModeEnum.COUNT_ACCURATE);
Patient inPatient = new Patient();
inPatient.addName().setFamily("version1");
inPatient.getMeta().addProfile("http://example.com/1");
@ -1517,6 +1524,8 @@ public class FhirResourceDaoDstu3Test extends BaseJpaDstu3Test {
@Test
public void testHistoryWithFromAndTo() throws Exception {
myDaoConfig.setHistoryCountMode(HistoryCountModeEnum.COUNT_ACCURATE);
String methodName = "testHistoryWithFromAndTo";
Patient patient = new Patient();
@ -1548,6 +1557,7 @@ public class FhirResourceDaoDstu3Test extends BaseJpaDstu3Test {
@Test
public void testHistoryWithFutureSinceDate() throws Exception {
myDaoConfig.setHistoryCountMode(HistoryCountModeEnum.COUNT_ACCURATE);
Date before = new Date();
Thread.sleep(10);

View File

@ -1,6 +1,7 @@
package ca.uhn.fhir.jpa.dao.r4;
import ca.uhn.fhir.jpa.api.config.DaoConfig;
import ca.uhn.fhir.jpa.api.model.HistoryCountModeEnum;
import ca.uhn.fhir.jpa.model.util.JpaConstants;
import ca.uhn.fhir.jpa.searchparam.SearchParameterMap;
import ca.uhn.fhir.jpa.util.SqlQuery;
@ -46,6 +47,7 @@ public class FhirResourceDaoR4QueryCountTest extends BaseJpaR4Test {
myDaoConfig.setIndexMissingFields(new DaoConfig().getIndexMissingFields());
myDaoConfig.setDeleteEnabled(new DaoConfig().isDeleteEnabled());
myDaoConfig.setMatchUrlCache(new DaoConfig().getMatchUrlCache());
myDaoConfig.setHistoryCountMode(DaoConfig.DEFAULT_HISTORY_COUNT_MODE);
}
@BeforeEach
@ -401,6 +403,8 @@ public class FhirResourceDaoR4QueryCountTest extends BaseJpaR4Test {
@Test
public void testHistory_Server() {
myDaoConfig.setHistoryCountMode(HistoryCountModeEnum.COUNT_ACCURATE);
runInTransaction(() -> {
Patient p = new Patient();
p.setId("A");
@ -457,6 +461,8 @@ public class FhirResourceDaoR4QueryCountTest extends BaseJpaR4Test {
*/
@Test
public void testHistory_Server_WithTags() {
myDaoConfig.setHistoryCountMode(HistoryCountModeEnum.COUNT_ACCURATE);
runInTransaction(() -> {
Patient p = new Patient();
p.getMeta().addTag("system", "code1", "displaY1");

View File

@ -3,6 +3,7 @@ package ca.uhn.fhir.jpa.dao.r4;
import ca.uhn.fhir.interceptor.model.RequestPartitionId;
import ca.uhn.fhir.jpa.api.config.DaoConfig;
import ca.uhn.fhir.jpa.api.dao.IFhirResourceDao;
import ca.uhn.fhir.jpa.api.model.HistoryCountModeEnum;
import ca.uhn.fhir.jpa.dao.BaseHapiFhirDao;
import ca.uhn.fhir.jpa.dao.BaseHapiFhirResourceDao;
import ca.uhn.fhir.jpa.dao.JpaResourceDao;
@ -14,6 +15,7 @@ import ca.uhn.fhir.jpa.model.entity.ResourceIndexedSearchParamString;
import ca.uhn.fhir.jpa.model.entity.ResourceTable;
import ca.uhn.fhir.jpa.model.entity.TagTypeEnum;
import ca.uhn.fhir.jpa.model.search.SearchStatusEnum;
import ca.uhn.fhir.jpa.model.util.UcumServiceUtil;
import ca.uhn.fhir.jpa.search.SearchCoordinatorSvcImpl;
import ca.uhn.fhir.jpa.searchparam.SearchParamConstants;
import ca.uhn.fhir.jpa.searchparam.SearchParameterMap;
@ -43,8 +45,6 @@ import ca.uhn.fhir.rest.server.exceptions.ResourceGoneException;
import ca.uhn.fhir.rest.server.exceptions.ResourceNotFoundException;
import ca.uhn.fhir.rest.server.exceptions.ResourceVersionConflictException;
import ca.uhn.fhir.rest.server.exceptions.UnprocessableEntityException;
import ca.uhn.fhir.jpa.model.util.UcumServiceUtil;
import com.google.common.base.Charsets;
import com.google.common.collect.Lists;
import org.apache.commons.io.IOUtils;
@ -131,7 +131,6 @@ import java.util.concurrent.Future;
import static org.apache.commons.lang3.StringUtils.countMatches;
import static org.apache.commons.lang3.StringUtils.defaultString;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.hamcrest.Matchers.containsString;
@ -163,8 +162,9 @@ public class FhirResourceDaoR4Test extends BaseJpaR4Test {
myDaoConfig.setEnforceReferentialIntegrityOnDelete(new DaoConfig().isEnforceReferentialIntegrityOnDelete());
myDaoConfig.setEnforceReferenceTargetTypes(new DaoConfig().isEnforceReferenceTargetTypes());
myDaoConfig.setIndexMissingFields(new DaoConfig().getIndexMissingFields());
myModelConfig.setNormalizedQuantitySearchLevel(NormalizedQuantitySearchLevel.NORMALIZED_QUANTITY_SEARCH_NOT_SUPPORTED);
}
myModelConfig.setNormalizedQuantitySearchLevel(NormalizedQuantitySearchLevel.NORMALIZED_QUANTITY_SEARCH_NOT_SUPPORTED);
myDaoConfig.setHistoryCountMode(DaoConfig.DEFAULT_HISTORY_COUNT_MODE);
}
@BeforeEach
public void before() {
@ -673,9 +673,9 @@ public class FhirResourceDaoR4Test extends BaseJpaR4Test {
IBundleProvider found = myObservationDao.search(new SearchParameterMap(Observation.SP_VALUE_QUANTITY, new QuantityParam("ne123", UcumServiceUtil.UCUM_CODESYSTEM_URL, "cm")).setLoadSynchronous(true));
assertEquals(0, found.size().intValue());
}
}
@Test
public void testChoiceParamQuantityPrecision() {
Observation o3 = new Observation();
@ -754,9 +754,9 @@ public class FhirResourceDaoR4Test extends BaseJpaR4Test {
List<IIdType> list = toUnqualifiedVersionlessIds(found);
assertThat(list, Matchers.empty());
}
}
@Test
public void testChoiceParamString() {
@ -1370,6 +1370,8 @@ public class FhirResourceDaoR4Test extends BaseJpaR4Test {
@Test
public void testDeleteResource() {
myDaoConfig.setHistoryCountMode(HistoryCountModeEnum.COUNT_ACCURATE);
int initialHistory = myPatientDao.history(null, null, mySrd).size();
IIdType id1;
@ -1559,7 +1561,7 @@ public class FhirResourceDaoR4Test extends BaseJpaR4Test {
myObservationDao.read(obs1id);
myObservationDao.read(obs2id);
}
@Test
public void testDeleteWithMatchUrl() {
String methodName = "testDeleteWithMatchUrl";
@ -1845,6 +1847,8 @@ public class FhirResourceDaoR4Test extends BaseJpaR4Test {
@Test
public void testHistoryOverMultiplePages() throws Exception {
myDaoConfig.setHistoryCountMode(HistoryCountModeEnum.COUNT_ACCURATE);
String methodName = "testHistoryOverMultiplePages";
Patient patient = new Patient();
@ -1995,6 +1999,8 @@ public class FhirResourceDaoR4Test extends BaseJpaR4Test {
@Test
public void testHistoryReflectsMetaOperations() {
myDaoConfig.setHistoryCountMode(HistoryCountModeEnum.COUNT_ACCURATE);
Patient inPatient = new Patient();
inPatient.addName().setFamily("version1");
inPatient.getMeta().addProfile("http://example.com/1");
@ -2079,6 +2085,8 @@ public class FhirResourceDaoR4Test extends BaseJpaR4Test {
@Test
public void testHistoryWithFromAndTo() throws Exception {
myDaoConfig.setHistoryCountMode(HistoryCountModeEnum.COUNT_ACCURATE);
String methodName = "testHistoryWithFromAndTo";
Patient patient = new Patient();
@ -2110,6 +2118,7 @@ public class FhirResourceDaoR4Test extends BaseJpaR4Test {
@Test
public void testHistoryWithFutureSinceDate() throws Exception {
myDaoConfig.setHistoryCountMode(HistoryCountModeEnum.COUNT_ACCURATE);
Date before = new Date();
Thread.sleep(10);
@ -3158,10 +3167,10 @@ public class FhirResourceDaoR4Test extends BaseJpaR4Test {
assertTrue(next.getResource().getIdElement().hasIdPart());
}
}
@Test()
public void testSortByComposite() {
IIdType pid0;
IIdType oid1;
IIdType oid2;
@ -3179,55 +3188,55 @@ public class FhirResourceDaoR4Test extends BaseJpaR4Test {
obs.getSubject().setReferenceElement(pid0);
obs.getCode().addCoding().setCode("2345-7").setSystem("http://loinc.org");
obs.setValue(new StringType("200"));
oid1 = myObservationDao.create(obs, mySrd).getId().toUnqualifiedVersionless();
ourLog.info("Observation: \n" + myFhirCtx.newJsonParser().setPrettyPrint(true).encodeResourceToString(obs));
}
{
Observation obs = new Observation();
obs.addIdentifier().setSystem("urn:system").setValue("FOO");
obs.getSubject().setReferenceElement(pid0);
obs.getCode().addCoding().setCode("2345-7").setSystem("http://loinc.org");
obs.setValue(new StringType("300"));
oid2 = myObservationDao.create(obs, mySrd).getId().toUnqualifiedVersionless();
ourLog.info("Observation: \n" + myFhirCtx.newJsonParser().setPrettyPrint(true).encodeResourceToString(obs));
}
{
Observation obs = new Observation();
obs.addIdentifier().setSystem("urn:system").setValue("FOO");
obs.getSubject().setReferenceElement(pid0);
obs.getCode().addCoding().setCode("2345-7").setSystem("http://loinc.org");
obs.setValue(new StringType("150"));
oid3 = myObservationDao.create(obs, mySrd).getId().toUnqualifiedVersionless();
ourLog.info("Observation: \n" + myFhirCtx.newJsonParser().setPrettyPrint(true).encodeResourceToString(obs));
}
{
Observation obs = new Observation();
obs.addIdentifier().setSystem("urn:system").setValue("FOO");
obs.getSubject().setReferenceElement(pid0);
obs.getCode().addCoding().setCode("2345-7").setSystem("http://loinc.org");
obs.setValue(new StringType("250"));
oid4 = myObservationDao.create(obs, mySrd).getId().toUnqualifiedVersionless();
ourLog.info("Observation: \n" + myFhirCtx.newJsonParser().setPrettyPrint(true).encodeResourceToString(obs));
}
SearchParameterMap pm = new SearchParameterMap();
pm.setSort(new SortSpec(Observation.SP_CODE_VALUE_STRING));
IBundleProvider found = myObservationDao.search(pm);
List<IIdType> list = toUnqualifiedVersionlessIds(found);
assertEquals(4, list.size());
assertEquals(oid3, list.get(0));
@ -3350,20 +3359,20 @@ public class FhirResourceDaoR4Test extends BaseJpaR4Test {
IIdType id2 = myPatientDao.create(p, mySrd).getId().toUnqualifiedVersionless();
p = new Patient();
p.setId(methodName+"1");
p.setId(methodName + "1");
p.addIdentifier().setSystem("urn:system").setValue(methodName);
IIdType idMethodName1 = myPatientDao.update(p, mySrd).getId().toUnqualifiedVersionless();
assertEquals(methodName+"1", idMethodName1.getIdPart());
assertEquals(methodName + "1", idMethodName1.getIdPart());
p = new Patient();
p.addIdentifier().setSystem("urn:system").setValue(methodName);
IIdType id3 = myPatientDao.create(p, mySrd).getId().toUnqualifiedVersionless();
p = new Patient();
p.setId(methodName+"2");
p.setId(methodName + "2");
p.addIdentifier().setSystem("urn:system").setValue(methodName);
IIdType idMethodName2 = myPatientDao.update(p, mySrd).getId().toUnqualifiedVersionless();
assertEquals(methodName+"2", idMethodName2.getIdPart());
assertEquals(methodName + "2", idMethodName2.getIdPart());
p = new Patient();
p.addIdentifier().setSystem("urn:system").setValue(methodName);
@ -3533,7 +3542,7 @@ public class FhirResourceDaoR4Test extends BaseJpaR4Test {
assertThat(actual, contains(id4, id3, id2, id1));
}
@Test
@Disabled
public void testSortByQuantityWithNormalizedQuantitySearchFullSupported() {
@ -4015,15 +4024,15 @@ public class FhirResourceDaoR4Test extends BaseJpaR4Test {
published = (ArrayList<Coding>) retrieved.getMeta().getTag();
sort(published);
assertEquals(3, published.size());
assertEquals( "Dog", published.get(0).getCode());
assertEquals( "Puppies", published.get(0).getDisplay());
assertEquals( null, published.get(0).getSystem());
assertEquals( "Cat", published.get(1).getCode());
assertEquals( "Kittens", published.get(1).getDisplay());
assertEquals( "http://foo", published.get(1).getSystem());
assertEquals( "Cow", published.get(2).getCode());
assertEquals( "Calves", published.get(2).getDisplay());
assertEquals( "http://foo", published.get(2).getSystem());
assertEquals("Dog", published.get(0).getCode());
assertEquals("Puppies", published.get(0).getDisplay());
assertEquals(null, published.get(0).getSystem());
assertEquals("Cat", published.get(1).getCode());
assertEquals("Kittens", published.get(1).getDisplay());
assertEquals("http://foo", published.get(1).getSystem());
assertEquals("Cow", published.get(2).getCode());
assertEquals("Calves", published.get(2).getDisplay());
assertEquals("http://foo", published.get(2).getSystem());
secLabels = retrieved.getMeta().getSecurity();
sortCodings(secLabels);

View File

@ -9,6 +9,7 @@ import ca.uhn.fhir.jpa.model.entity.ResourceIndexedSearchParamString;
import ca.uhn.fhir.jpa.model.entity.ResourceTable;
import ca.uhn.fhir.jpa.model.entity.ResourceTag;
import ca.uhn.fhir.jpa.model.entity.TagTypeEnum;
import ca.uhn.fhir.jpa.partition.SystemRequestDetails;
import ca.uhn.fhir.jpa.provider.SystemProviderDstu2Test;
import ca.uhn.fhir.jpa.searchparam.SearchParameterMap;
import ca.uhn.fhir.model.api.ResourceMetadataKeyEnum;
@ -26,6 +27,7 @@ import ca.uhn.fhir.rest.server.exceptions.ResourceNotFoundException;
import ca.uhn.fhir.rest.server.exceptions.ResourceVersionConflictException;
import ca.uhn.fhir.rest.server.exceptions.UnprocessableEntityException;
import ca.uhn.fhir.util.BundleBuilder;
import ca.uhn.fhir.util.ClasspathUtil;
import org.apache.commons.io.IOUtils;
import org.hamcrest.Matchers;
import org.hl7.fhir.instance.model.api.IAnyResource;
@ -464,7 +466,7 @@ public class FhirSystemDaoR4Test extends BaseJpaR4SystemTest {
*/
@Test
public void testContainedArePreservedForBug410() throws IOException {
String input = IOUtils.toString(getClass().getResourceAsStream("/r4/bug-410-bundle.xml"), StandardCharsets.UTF_8);
String input = ClasspathUtil.loadResource("/r4/bug-410-bundle.xml");
Bundle bundle = myFhirCtx.newXmlParser().parseResource(Bundle.class, input);
Bundle output = mySystemDao.transaction(mySrd, bundle);

View File

@ -0,0 +1,286 @@
package ca.uhn.fhir.jpa.dao.r4;
import ca.uhn.fhir.jpa.api.config.DaoConfig;
import ca.uhn.fhir.jpa.api.model.HistoryCountModeEnum;
import ca.uhn.fhir.jpa.partition.SystemRequestDetails;
import ca.uhn.fhir.rest.api.server.IBundleProvider;
import ca.uhn.fhir.util.BundleBuilder;
import ca.uhn.fhir.util.StopWatch;
import org.hl7.fhir.r4.model.Bundle;
import org.hl7.fhir.r4.model.Patient;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Locale;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import static ca.uhn.fhir.jpa.util.TestUtil.sleepAtLeast;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.startsWith;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class JpaHistoryR4Test extends BaseJpaR4SystemTest {
private static final Logger ourLog = LoggerFactory.getLogger(JpaHistoryR4Test.class);
@AfterEach
public void after() {
myDaoConfig.setHistoryCountMode(DaoConfig.DEFAULT_HISTORY_COUNT_MODE);
}
@Test
public void testTypeHistory_TotalDisabled() {
myDaoConfig.setHistoryCountMode(HistoryCountModeEnum.COUNT_DISABLED);
create20Patients();
/*
* Perform initial history
*/
myCaptureQueriesListener.clear();
IBundleProvider history = myPatientDao.history(null, null, new SystemRequestDetails());
// Simulate the server requesting the Bundle.total value
assertEquals(null, history.size());
// Simulate the server actually loading the resources
history.getResources(0, 10);
assertEquals(0, myCaptureQueriesListener.countDeleteQueries());
assertEquals(0, myCaptureQueriesListener.countInsertQueries());
assertEquals(0, myCaptureQueriesListener.countUpdateQueries());
// Resource query happens but not count query
assertEquals(1, myCaptureQueriesListener.countSelectQueries());
assertThat(myCaptureQueriesListener.getSelectQueries().get(0).getSql(false, false).toLowerCase(Locale.ROOT), not(startsWith("select count")));
}
@Test
public void testTypeHistory_CountAccurate() {
myDaoConfig.setHistoryCountMode(HistoryCountModeEnum.COUNT_ACCURATE);
create20Patients();
/*
* Perform initial history
*/
myCaptureQueriesListener.clear();
IBundleProvider history = myPatientDao.history(null, null, new SystemRequestDetails());
// Simulate the server requesting the Bundle.total value
assertEquals(20, history.sizeOrThrowNpe());
// Simulate the server actually loading the resources
history.getResources(0, 10);
assertEquals(0, myCaptureQueriesListener.countDeleteQueries());
assertEquals(0, myCaptureQueriesListener.countInsertQueries());
assertEquals(0, myCaptureQueriesListener.countUpdateQueries());
assertEquals(2, myCaptureQueriesListener.countSelectQueries());
assertThat(myCaptureQueriesListener.getSelectQueries().get(0).getSql(false, false).toLowerCase(Locale.ROOT), startsWith("select count"));
assertThat(myCaptureQueriesListener.getSelectQueries().get(1).getSql(false, false).toLowerCase(Locale.ROOT), containsString(" from hfj_res_ver "));
/*
* Subsequent history should also perform count
*/
myCaptureQueriesListener.clear();
history = myPatientDao.history(null, null, new SystemRequestDetails());
// Simulate the server requesting the Bundle.total value
assertEquals(20, history.sizeOrThrowNpe());
// Simulate the server actually loading the resources
history.getResources(0, 10);
assertEquals(0, myCaptureQueriesListener.countDeleteQueries());
assertEquals(0, myCaptureQueriesListener.countInsertQueries());
assertEquals(0, myCaptureQueriesListener.countUpdateQueries());
assertEquals(2, myCaptureQueriesListener.countSelectQueries());
assertThat(myCaptureQueriesListener.getSelectQueries().get(0).getSql(false, false).toLowerCase(Locale.ROOT), startsWith("select count"));
assertThat(myCaptureQueriesListener.getSelectQueries().get(1).getSql(false, false).toLowerCase(Locale.ROOT), containsString(" from hfj_res_ver "));
}
@Test
public void testTypeHistory_CountCacheEnabled() {
create20Patients();
/*
* Perform initial history
*/
myCaptureQueriesListener.clear();
IBundleProvider history = myPatientDao.history(null, null, new SystemRequestDetails());
// Simulate the server requesting the Bundle.total value
assertEquals(20, history.sizeOrThrowNpe());
// Simulate the server actually loading the resources
history.getResources(0, 10);
assertEquals(0, myCaptureQueriesListener.countDeleteQueries());
assertEquals(0, myCaptureQueriesListener.countInsertQueries());
assertEquals(0, myCaptureQueriesListener.countUpdateQueries());
assertEquals(2, myCaptureQueriesListener.countSelectQueries());
myCaptureQueriesListener.logSelectQueries(false, false);
assertThat(myCaptureQueriesListener.getSelectQueries().get(0).getSql(false, false).toLowerCase(Locale.ROOT), startsWith("select count"));
assertThat(myCaptureQueriesListener.getSelectQueries().get(1).getSql(false, false).toLowerCase(Locale.ROOT), containsString(" from hfj_res_ver "));
runInTransaction(() -> assertEquals(0, mySearchEntityDao.count()));
/*
* Perform history a second time (no count should be performed)
*/
myCaptureQueriesListener.clear();
history = myPatientDao.history(null, null, new SystemRequestDetails());
// Simulate the server requesting the Bundle.total value
assertEquals(20, history.sizeOrThrowNpe());
// Simulate the server actually loading the resources
history.getResources(0, 10);
assertEquals(0, myCaptureQueriesListener.countDeleteQueries());
assertEquals(0, myCaptureQueriesListener.countInsertQueries());
assertEquals(0, myCaptureQueriesListener.countUpdateQueries());
assertEquals(1, myCaptureQueriesListener.countSelectQueries());
myCaptureQueriesListener.logSelectQueries(false, false);
assertThat(myCaptureQueriesListener.getSelectQueries().get(0).getSql(false, false).toLowerCase(Locale.ROOT), containsString(" from hfj_res_ver "));
runInTransaction(() -> assertEquals(0, mySearchEntityDao.count()));
}
@Test
public void testTypeHistory_CountCacheEnabled_WithOffset() {
create20Patients();
sleepAtLeast(10);
/*
* Perform initial history
*/
myCaptureQueriesListener.clear();
IBundleProvider history = myPatientDao.history(null, new Date(), new SystemRequestDetails());
// No count since there is an offset
assertEquals(null, history.size());
// Simulate the server actually loading the resources
assertEquals(20, history.getResources(0, 999).size());
assertEquals(0, myCaptureQueriesListener.countDeleteQueries());
assertEquals(0, myCaptureQueriesListener.countInsertQueries());
assertEquals(0, myCaptureQueriesListener.countUpdateQueries());
assertEquals(1, myCaptureQueriesListener.countSelectQueries());
assertThat(myCaptureQueriesListener.getSelectQueries().get(0).getSql(false, false).toLowerCase(Locale.ROOT), not(startsWith("select count")));
}
@Test
public void testSystemHistory_CountCacheEnabled() {
create20Patients();
/*
* Perform initial history
*/
myCaptureQueriesListener.clear();
IBundleProvider history = mySystemDao.history(null, null, new SystemRequestDetails());
// Simulate the server requesting the Bundle.total value
assertEquals(20, history.sizeOrThrowNpe());
// Simulate the server actually loading the resources
history.getResources(0, 10);
assertEquals(0, myCaptureQueriesListener.countDeleteQueries());
assertEquals(0, myCaptureQueriesListener.countInsertQueries());
assertEquals(0, myCaptureQueriesListener.countUpdateQueries());
assertEquals(2, myCaptureQueriesListener.countSelectQueries());
myCaptureQueriesListener.logSelectQueries(false, false);
assertThat(myCaptureQueriesListener.getSelectQueries().get(0).getSql(false, false).toLowerCase(Locale.ROOT), startsWith("select count"));
assertThat(myCaptureQueriesListener.getSelectQueries().get(1).getSql(false, false).toLowerCase(Locale.ROOT), containsString(" from hfj_res_ver "));
runInTransaction(() -> assertEquals(0, mySearchEntityDao.count()));
/*
* Perform history a second time (no count should be performed)
*/
myCaptureQueriesListener.clear();
history = mySystemDao.history(null, null, new SystemRequestDetails());
// Simulate the server requesting the Bundle.total value
assertEquals(20, history.sizeOrThrowNpe());
// Simulate the server actually loading the resources
history.getResources(0, 10);
assertEquals(0, myCaptureQueriesListener.countDeleteQueries());
assertEquals(0, myCaptureQueriesListener.countInsertQueries());
assertEquals(0, myCaptureQueriesListener.countUpdateQueries());
assertEquals(1, myCaptureQueriesListener.countSelectQueries());
myCaptureQueriesListener.logSelectQueries(false, false);
assertThat(myCaptureQueriesListener.getSelectQueries().get(0).getSql(false, false).toLowerCase(Locale.ROOT), containsString(" from hfj_res_ver "));
runInTransaction(() -> assertEquals(0, mySearchEntityDao.count()));
}
@Test
public void testSystemHistory_CountCacheEnabled_Concurrent() throws ExecutionException, InterruptedException {
create20Patients();
myCaptureQueriesListener.clear();
ExecutorService threadPool = Executors.newFixedThreadPool(20);
try {
Runnable task = () -> {
IBundleProvider history = mySystemDao.history(null, null, new SystemRequestDetails());
assertEquals(20, history.sizeOrThrowNpe());
assertEquals(20, history.getResources(0, 999).size());
};
List<Future<?>> futures = new ArrayList<>();
for (int i = 0; i < 20; i++) {
futures.add(threadPool.submit(task));
}
for (Future<?> next : futures) {
next.get();
}
} finally {
threadPool.shutdown();
}
assertEquals(0, myCaptureQueriesListener.countDeleteQueries());
assertEquals(0, myCaptureQueriesListener.countInsertQueries());
assertEquals(0, myCaptureQueriesListener.countUpdateQueries());
// We fetch the history resources 20 times, but should only fetch the
// count(*) once, for a total of 21
assertEquals(20 + 1, myCaptureQueriesListener.countSelectQueries());
}
private void create20Patients() {
BundleBuilder bb = new BundleBuilder(myFhirCtx);
int count = 20;
for (int i = 0; i < count; i++) {
Patient p = new Patient();
p.setActive(true);
bb.addTransactionCreateEntry(p);
}
StopWatch sw = new StopWatch();
mySystemDao.transaction(new SystemRequestDetails(), (Bundle) bb.getBundle());
ourLog.info("Created {} patients in {}", count, sw);
}
}

View File

@ -1,6 +1,7 @@
package ca.uhn.fhirtest.config;
import ca.uhn.fhir.jpa.api.config.DaoConfig;
import ca.uhn.fhir.jpa.api.model.HistoryCountModeEnum;
import ca.uhn.fhir.jpa.config.BaseJavaConfigDstu2;
import ca.uhn.fhir.jpa.model.entity.ModelConfig;
import ca.uhn.fhir.jpa.search.HapiLuceneAnalysisConfigurer;
@ -80,6 +81,7 @@ public class TestDstu2Config extends BaseJavaConfigDstu2 {
retVal.setFilterParameterEnabled(true);
retVal.setDefaultSearchParamsCanBeOverridden(false);
retVal.getModelConfig().setIndexOnContainedResources(true);
// retVal.setHistoryCountMode(HistoryCountModeEnum.COUNT_ACCURATE);
return retVal;
}