diff --git a/hapi-deployable-pom/pom.xml b/hapi-deployable-pom/pom.xml
index d33de296906..2146bb70fd8 100644
--- a/hapi-deployable-pom/pom.xml
+++ b/hapi-deployable-pom/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhirhapi-fhir
- 7.7.6-SNAPSHOT
+ 7.7.7-SNAPSHOT../pom.xml
diff --git a/hapi-fhir-android/pom.xml b/hapi-fhir-android/pom.xml
index 1e0949da036..6c1b44706b1 100644
--- a/hapi-fhir-android/pom.xml
+++ b/hapi-fhir-android/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhirhapi-deployable-pom
- 7.7.6-SNAPSHOT
+ 7.7.7-SNAPSHOT../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-base/pom.xml b/hapi-fhir-base/pom.xml
index 84536f66a91..9a173eed3eb 100644
--- a/hapi-fhir-base/pom.xml
+++ b/hapi-fhir-base/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhirhapi-deployable-pom
- 7.7.6-SNAPSHOT
+ 7.7.7-SNAPSHOT../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-base/src/main/java/ca/uhn/fhir/util/TaskChunker.java b/hapi-fhir-base/src/main/java/ca/uhn/fhir/util/TaskChunker.java
index 9514e059740..7b92d0832cf 100644
--- a/hapi-fhir-base/src/main/java/ca/uhn/fhir/util/TaskChunker.java
+++ b/hapi-fhir-base/src/main/java/ca/uhn/fhir/util/TaskChunker.java
@@ -40,13 +40,22 @@ import java.util.stream.Stream;
*/
public class TaskChunker {
- public void chunk(Collection theInput, int theChunkSize, Consumer> theBatchConsumer) {
+ public static void chunk(List theInput, int theChunkSize, Consumer> theBatchConsumer) {
+ if (theInput.size() <= theChunkSize) {
+ theBatchConsumer.accept(theInput);
+ return;
+ }
+ chunk((Collection) theInput, theChunkSize, theBatchConsumer);
+ }
+
+ public static void chunk(Collection theInput, int theChunkSize, Consumer> theBatchConsumer) {
List input;
if (theInput instanceof List) {
input = (List) theInput;
} else {
input = new ArrayList<>(theInput);
}
+
for (int i = 0; i < input.size(); i += theChunkSize) {
int to = i + theChunkSize;
to = Math.min(to, input.size());
@@ -56,12 +65,11 @@ public class TaskChunker {
}
@Nonnull
- public Stream> chunk(Stream theStream, int theChunkSize) {
+ public static Stream> chunk(Stream theStream, int theChunkSize) {
return StreamUtil.partition(theStream, theChunkSize);
}
- @Nonnull
- public void chunk(Iterator theIterator, int theChunkSize, Consumer> theListConsumer) {
+ public static void chunk(Iterator theIterator, int theChunkSize, Consumer> theListConsumer) {
chunk(Streams.stream(theIterator), theChunkSize).forEach(theListConsumer);
}
}
diff --git a/hapi-fhir-base/src/main/resources/ca/uhn/fhir/i18n/hapi-messages.properties b/hapi-fhir-base/src/main/resources/ca/uhn/fhir/i18n/hapi-messages.properties
index f338fc8fa1c..def641589f0 100644
--- a/hapi-fhir-base/src/main/resources/ca/uhn/fhir/i18n/hapi-messages.properties
+++ b/hapi-fhir-base/src/main/resources/ca/uhn/fhir/i18n/hapi-messages.properties
@@ -87,6 +87,7 @@ ca.uhn.fhir.jpa.config.HapiFhirHibernateJpaDialect.resourceVersionConstraintFail
ca.uhn.fhir.jpa.config.HapiFhirHibernateJpaDialect.resourceIndexedCompositeStringUniqueConstraintFailure=The operation has failed with a unique index constraint failure. This probably means that the operation was trying to create/update a resource that would have resulted in a duplicate value for a unique index.
ca.uhn.fhir.jpa.config.HapiFhirHibernateJpaDialect.forcedIdConstraintFailure=The operation has failed with a client-assigned ID constraint failure. This typically means that multiple client threads are trying to create a new resource with the same client-assigned ID at the same time, and this thread was chosen to be rejected. It can also happen when a request disables the Upsert Existence Check.
ca.uhn.fhir.jpa.binary.interceptor.BinaryStorageInterceptor.externalizedBinaryStorageExtensionFoundInRequestBody=Illegal extension found in request payload - URL "{0}" and value "{1}"
+ca.uhn.fhir.jpa.dao.BaseHapiFhirDao.cantUndeleteWithDeletesDisabled=Unable to restore previously deleted resource as deletes are disabled on this server.
ca.uhn.fhir.jpa.dao.BaseHapiFhirDao.incomingNoopInTransaction=Transaction contains resource with operation NOOP. This is only valid as a response operation, not in a request
ca.uhn.fhir.jpa.dao.BaseHapiFhirDao.invalidMatchUrlInvalidResourceType=Invalid match URL "{0}" - Unknown resource type: "{1}"
ca.uhn.fhir.jpa.dao.BaseStorageDao.invalidMatchUrlNoMatches=Invalid match URL "{0}" - No resources match this search
@@ -200,6 +201,7 @@ ca.uhn.fhir.jpa.search.builder.predicate.ResourceLinkPredicateBuilder.invalidTar
ca.uhn.fhir.jpa.search.SearchCoordinatorSvcImpl.invalidResourceType=Invalid/unsupported resource type: "{0}"
ca.uhn.fhir.jpa.dao.index.IdHelperService.nonUniqueForcedId=Non-unique ID specified, can not process request
+ca.uhn.fhir.jpa.dao.index.IdHelperService.deletedId=Resource {0} has been deleted
ca.uhn.fhir.jpa.partition.PartitionLookupSvcImpl.noIdSupplied=No Partition ID supplied
ca.uhn.fhir.jpa.partition.PartitionLookupSvcImpl.missingPartitionIdOrName=Partition must have an ID and a Name
diff --git a/hapi-fhir-base/src/test/java/ca/uhn/fhir/util/TaskChunkerTest.java b/hapi-fhir-base/src/test/java/ca/uhn/fhir/util/TaskChunkerTest.java
index 38df69f017f..50f71fa79f0 100644
--- a/hapi-fhir-base/src/test/java/ca/uhn/fhir/util/TaskChunkerTest.java
+++ b/hapi-fhir-base/src/test/java/ca/uhn/fhir/util/TaskChunkerTest.java
@@ -37,7 +37,7 @@ public class TaskChunkerTest {
List input = newIntRangeList(0, 35);
// Execute
- new TaskChunker().chunk(input, 10, myConsumer);
+ TaskChunker.chunk(input, 10, myConsumer);
// Verify
verify(myConsumer, times(4)).accept(myConsumerCaptor.capture());
diff --git a/hapi-fhir-bom/pom.xml b/hapi-fhir-bom/pom.xml
index d8d738caf99..db25bc4f2ba 100644
--- a/hapi-fhir-bom/pom.xml
+++ b/hapi-fhir-bom/pom.xml
@@ -4,7 +4,7 @@
4.0.0ca.uhn.hapi.fhirhapi-fhir-bom
- 7.7.6-SNAPSHOT
+ 7.7.7-SNAPSHOTpomHAPI FHIR BOM
@@ -12,7 +12,7 @@
ca.uhn.hapi.fhirhapi-deployable-pom
- 7.7.6-SNAPSHOT
+ 7.7.7-SNAPSHOT../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-checkstyle/pom.xml b/hapi-fhir-checkstyle/pom.xml
index b4d72e5020f..6edc883df32 100644
--- a/hapi-fhir-checkstyle/pom.xml
+++ b/hapi-fhir-checkstyle/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhirhapi-fhir
- 7.7.6-SNAPSHOT
+ 7.7.7-SNAPSHOT../pom.xml
diff --git a/hapi-fhir-cli/hapi-fhir-cli-api/pom.xml b/hapi-fhir-cli/hapi-fhir-cli-api/pom.xml
index 256dece5d36..d4a3f519428 100644
--- a/hapi-fhir-cli/hapi-fhir-cli-api/pom.xml
+++ b/hapi-fhir-cli/hapi-fhir-cli-api/pom.xml
@@ -4,7 +4,7 @@
ca.uhn.hapi.fhirhapi-deployable-pom
- 7.7.6-SNAPSHOT
+ 7.7.7-SNAPSHOT../../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-cli/hapi-fhir-cli-app/pom.xml b/hapi-fhir-cli/hapi-fhir-cli-app/pom.xml
index a0e66e2725c..e402e75a1e3 100644
--- a/hapi-fhir-cli/hapi-fhir-cli-app/pom.xml
+++ b/hapi-fhir-cli/hapi-fhir-cli-app/pom.xml
@@ -6,7 +6,7 @@
ca.uhn.hapi.fhirhapi-fhir-cli
- 7.7.6-SNAPSHOT
+ 7.7.7-SNAPSHOT../pom.xml
diff --git a/hapi-fhir-cli/pom.xml b/hapi-fhir-cli/pom.xml
index e32deda674c..1c93cfa5375 100644
--- a/hapi-fhir-cli/pom.xml
+++ b/hapi-fhir-cli/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhirhapi-fhir
- 7.7.6-SNAPSHOT
+ 7.7.7-SNAPSHOT../pom.xml
diff --git a/hapi-fhir-client-okhttp/pom.xml b/hapi-fhir-client-okhttp/pom.xml
index d299e34c731..eb1663c660b 100644
--- a/hapi-fhir-client-okhttp/pom.xml
+++ b/hapi-fhir-client-okhttp/pom.xml
@@ -4,7 +4,7 @@
ca.uhn.hapi.fhirhapi-deployable-pom
- 7.7.6-SNAPSHOT
+ 7.7.7-SNAPSHOT../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-client/pom.xml b/hapi-fhir-client/pom.xml
index 407addfc2ee..c743cad6eb5 100644
--- a/hapi-fhir-client/pom.xml
+++ b/hapi-fhir-client/pom.xml
@@ -4,7 +4,7 @@
ca.uhn.hapi.fhirhapi-deployable-pom
- 7.7.6-SNAPSHOT
+ 7.7.7-SNAPSHOT../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-converter/pom.xml b/hapi-fhir-converter/pom.xml
index 63f9fe95ade..16105895f4c 100644
--- a/hapi-fhir-converter/pom.xml
+++ b/hapi-fhir-converter/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhirhapi-deployable-pom
- 7.7.6-SNAPSHOT
+ 7.7.7-SNAPSHOT../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-dist/pom.xml b/hapi-fhir-dist/pom.xml
index 31519bd4300..bc99a2f88c7 100644
--- a/hapi-fhir-dist/pom.xml
+++ b/hapi-fhir-dist/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhirhapi-fhir
- 7.7.6-SNAPSHOT
+ 7.7.7-SNAPSHOT../pom.xml
diff --git a/hapi-fhir-docs/pom.xml b/hapi-fhir-docs/pom.xml
index f8b791bcc5c..27a94cebe28 100644
--- a/hapi-fhir-docs/pom.xml
+++ b/hapi-fhir-docs/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhirhapi-deployable-pom
- 7.7.6-SNAPSHOT
+ 7.7.7-SNAPSHOT../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/changelog/7_8_0/6460-dont-store-reslink-target-partition-date.yaml b/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/changelog/7_8_0/6460-dont-store-reslink-target-partition-date.yaml
new file mode 100644
index 00000000000..db25abdd722
--- /dev/null
+++ b/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/changelog/7_8_0/6460-dont-store-reslink-target-partition-date.yaml
@@ -0,0 +1,5 @@
+---
+type: change
+issue: 6460
+title: "The HFJ_RES_LINK table with no longer store the `PARTITION_DATE` value for the indexed link target
+ resource, as this was an unused feature which has been removed as a part of a larger performance optimization."
diff --git a/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/changelog/7_8_0/6460-improve-transaction-performance-for-external-refs.yaml b/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/changelog/7_8_0/6460-improve-transaction-performance-for-external-refs.yaml
new file mode 100644
index 00000000000..f67eb29bbf5
--- /dev/null
+++ b/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/changelog/7_8_0/6460-improve-transaction-performance-for-external-refs.yaml
@@ -0,0 +1,9 @@
+---
+type: perf
+issue: 6460
+title: "The JPA server FHIR transaction processor will now pre-fetch the target
+ resource state for references to resources that don't also appear in the
+ transaction bundle. This means that if you process a large FHIR transaction containing
+ many references to other resources in the repository that are not also being
+ updated in the same transaction, you should see a very significant improvement
+ in performance."
diff --git a/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/changelog/7_8_0/6460-improve-transaction-performance-for-ids.yaml b/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/changelog/7_8_0/6460-improve-transaction-performance-for-ids.yaml
new file mode 100644
index 00000000000..11808b898fe
--- /dev/null
+++ b/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/changelog/7_8_0/6460-improve-transaction-performance-for-ids.yaml
@@ -0,0 +1,7 @@
+---
+type: perf
+issue: 6460
+title: "The JPA server FHIR transaction processor will now more aggressively cache
+ resource IDs for previously seen resources, reducing the number of database reads
+ required when processing transactions. This should provide a noticeable improvement
+ in performance when processing transactions which update pre-existing resources."
diff --git a/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/changelog/7_8_0/6460-prevent-undeleting-if-deletes-disabled.yaml b/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/changelog/7_8_0/6460-prevent-undeleting-if-deletes-disabled.yaml
new file mode 100644
index 00000000000..9f9d20cd15f
--- /dev/null
+++ b/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/changelog/7_8_0/6460-prevent-undeleting-if-deletes-disabled.yaml
@@ -0,0 +1,5 @@
+---
+type: change
+issue: 6460
+title: "If deletes are disabled in the JPA server, it is no longer possible to un-delete
+ a resource (i.e. update a previously deleted resource to make it non-deleted)."
diff --git a/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/changelog/7_8_0/6460-version-delete-and-update-in-same-transaction-separately.yaml b/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/changelog/7_8_0/6460-version-delete-and-update-in-same-transaction-separately.yaml
new file mode 100644
index 00000000000..9fdb5449ff8
--- /dev/null
+++ b/hapi-fhir-docs/src/main/resources/ca/uhn/hapi/fhir/changelog/7_8_0/6460-version-delete-and-update-in-same-transaction-separately.yaml
@@ -0,0 +1,9 @@
+---
+type: change
+issue: 6460
+title: "When performing a FHIR Transaction which deletes and then updates (or otherwise
+ un-deletes) the same resource within a single transaction, the delete was previously
+ not stored as a distinct version (meaning that the resource version was only
+ incremented once, and no delete was actually stored in the resource history. This
+ has been changed so that deletes will always appear as a distinct entry in the
+ resource history."
diff --git a/hapi-fhir-jacoco/pom.xml b/hapi-fhir-jacoco/pom.xml
index 676ceab42aa..1e106aaa69f 100644
--- a/hapi-fhir-jacoco/pom.xml
+++ b/hapi-fhir-jacoco/pom.xml
@@ -11,7 +11,7 @@
ca.uhn.hapi.fhirhapi-deployable-pom
- 7.7.6-SNAPSHOT
+ 7.7.7-SNAPSHOT../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-jaxrsserver-base/pom.xml b/hapi-fhir-jaxrsserver-base/pom.xml
index 1b392dd584a..fe17d290684 100644
--- a/hapi-fhir-jaxrsserver-base/pom.xml
+++ b/hapi-fhir-jaxrsserver-base/pom.xml
@@ -4,7 +4,7 @@
ca.uhn.hapi.fhirhapi-deployable-pom
- 7.7.6-SNAPSHOT
+ 7.7.7-SNAPSHOT../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-jpa/pom.xml b/hapi-fhir-jpa/pom.xml
index 6a91cfd67cf..3d91a9e4086 100644
--- a/hapi-fhir-jpa/pom.xml
+++ b/hapi-fhir-jpa/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhirhapi-deployable-pom
- 7.7.6-SNAPSHOT
+ 7.7.7-SNAPSHOT../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-jpa/src/main/java/ca/uhn/fhir/jpa/config/HapiFhirLocalContainerEntityManagerFactoryBean.java b/hapi-fhir-jpa/src/main/java/ca/uhn/fhir/jpa/config/HapiFhirLocalContainerEntityManagerFactoryBean.java
index fd38355ecd0..99024e56ec6 100644
--- a/hapi-fhir-jpa/src/main/java/ca/uhn/fhir/jpa/config/HapiFhirLocalContainerEntityManagerFactoryBean.java
+++ b/hapi-fhir-jpa/src/main/java/ca/uhn/fhir/jpa/config/HapiFhirLocalContainerEntityManagerFactoryBean.java
@@ -20,7 +20,11 @@
package ca.uhn.fhir.jpa.config;
import com.google.common.base.Strings;
-import org.hibernate.cfg.AvailableSettings;
+import jakarta.annotation.Nonnull;
+import org.hibernate.cfg.BatchSettings;
+import org.hibernate.cfg.JdbcSettings;
+import org.hibernate.cfg.ManagedBeanSettings;
+import org.hibernate.cfg.QuerySettings;
import org.hibernate.query.criteria.ValueHandlingMode;
import org.hibernate.resource.jdbc.spi.PhysicalConnectionHandlingMode;
import org.springframework.beans.factory.config.ConfigurableListableBeanFactory;
@@ -46,18 +50,19 @@ public class HapiFhirLocalContainerEntityManagerFactoryBean extends LocalContain
myConfigurableListableBeanFactory = theConfigurableListableBeanFactory;
}
+ @Nonnull
@Override
public Map getJpaPropertyMap() {
Map retVal = super.getJpaPropertyMap();
// SOMEDAY these defaults can be set in the constructor. setJpaProperties does a merge.
- if (!retVal.containsKey(AvailableSettings.CRITERIA_VALUE_HANDLING_MODE)) {
- retVal.put(AvailableSettings.CRITERIA_VALUE_HANDLING_MODE, ValueHandlingMode.BIND);
+ if (!retVal.containsKey(QuerySettings.CRITERIA_VALUE_HANDLING_MODE)) {
+ retVal.put(QuerySettings.CRITERIA_VALUE_HANDLING_MODE, ValueHandlingMode.BIND);
}
- if (!retVal.containsKey(AvailableSettings.CONNECTION_HANDLING)) {
+ if (!retVal.containsKey(JdbcSettings.CONNECTION_HANDLING)) {
retVal.put(
- AvailableSettings.CONNECTION_HANDLING,
+ JdbcSettings.CONNECTION_HANDLING,
PhysicalConnectionHandlingMode.DELAYED_ACQUISITION_AND_RELEASE_AFTER_TRANSACTION);
}
@@ -65,26 +70,25 @@ public class HapiFhirLocalContainerEntityManagerFactoryBean extends LocalContain
* Set some performance options
*/
- if (!retVal.containsKey(AvailableSettings.STATEMENT_BATCH_SIZE)) {
- retVal.put(AvailableSettings.STATEMENT_BATCH_SIZE, "30");
+ if (!retVal.containsKey(BatchSettings.STATEMENT_BATCH_SIZE)) {
+ retVal.put(BatchSettings.STATEMENT_BATCH_SIZE, "30");
}
- if (!retVal.containsKey(AvailableSettings.ORDER_INSERTS)) {
- retVal.put(AvailableSettings.ORDER_INSERTS, "true");
+ if (!retVal.containsKey(BatchSettings.ORDER_INSERTS)) {
+ retVal.put(BatchSettings.ORDER_INSERTS, "true");
}
- if (!retVal.containsKey(AvailableSettings.ORDER_UPDATES)) {
- retVal.put(AvailableSettings.ORDER_UPDATES, "true");
+ if (!retVal.containsKey(BatchSettings.ORDER_UPDATES)) {
+ retVal.put(BatchSettings.ORDER_UPDATES, "true");
}
- if (!retVal.containsKey(AvailableSettings.BATCH_VERSIONED_DATA)) {
- retVal.put(AvailableSettings.BATCH_VERSIONED_DATA, "true");
+ if (!retVal.containsKey(BatchSettings.BATCH_VERSIONED_DATA)) {
+ retVal.put(BatchSettings.BATCH_VERSIONED_DATA, "true");
}
// Why is this here, you ask? LocalContainerEntityManagerFactoryBean actually clobbers the setting hibernate
- // needs
- // in order to be able to resolve beans, so we add it back in manually here
- if (!retVal.containsKey(AvailableSettings.BEAN_CONTAINER)) {
- retVal.put(AvailableSettings.BEAN_CONTAINER, new SpringBeanContainer(myConfigurableListableBeanFactory));
+ // needs in order to be able to resolve beans, so we add it back in manually here
+ if (!retVal.containsKey(ManagedBeanSettings.BEAN_CONTAINER)) {
+ retVal.put(ManagedBeanSettings.BEAN_CONTAINER, new SpringBeanContainer(myConfigurableListableBeanFactory));
}
return retVal;
diff --git a/hapi-fhir-jpaserver-base/pom.xml b/hapi-fhir-jpaserver-base/pom.xml
index 5a4d190cc43..b7db9a6a5a5 100644
--- a/hapi-fhir-jpaserver-base/pom.xml
+++ b/hapi-fhir-jpaserver-base/pom.xml
@@ -5,7 +5,7 @@
ca.uhn.hapi.fhirhapi-deployable-pom
- 7.7.6-SNAPSHOT
+ 7.7.7-SNAPSHOT../hapi-deployable-pom/pom.xml
diff --git a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/bulk/export/svc/JpaBulkExportProcessor.java b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/bulk/export/svc/JpaBulkExportProcessor.java
index fca9611aed2..17a6833bd12 100644
--- a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/bulk/export/svc/JpaBulkExportProcessor.java
+++ b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/bulk/export/svc/JpaBulkExportProcessor.java
@@ -42,7 +42,6 @@ import ca.uhn.fhir.jpa.model.dao.JpaPid;
import ca.uhn.fhir.jpa.model.search.SearchBuilderLoadIncludesParameters;
import ca.uhn.fhir.jpa.model.search.SearchRuntimeDetails;
import ca.uhn.fhir.jpa.searchparam.SearchParameterMap;
-import ca.uhn.fhir.jpa.util.QueryChunker;
import ca.uhn.fhir.mdm.api.MdmMatchResultEnum;
import ca.uhn.fhir.mdm.dao.IMdmLinkDao;
import ca.uhn.fhir.mdm.model.MdmPidTuple;
@@ -59,6 +58,7 @@ import ca.uhn.fhir.util.ExtensionUtil;
import ca.uhn.fhir.util.HapiExtensions;
import ca.uhn.fhir.util.Logs;
import ca.uhn.fhir.util.SearchParameterUtil;
+import ca.uhn.fhir.util.TaskChunker;
import jakarta.annotation.Nonnull;
import jakarta.persistence.EntityManager;
import org.apache.commons.lang3.StringUtils;
@@ -315,8 +315,7 @@ public class JpaBulkExportProcessor implements IBulkExportProcessor {
// for each patient pid ->
// search for the target resources, with their correct patient references, chunked.
// The results will be jammed into myReadPids
- QueryChunker queryChunker = new QueryChunker<>();
- queryChunker.chunk(expandedMemberResourceIds, QUERY_CHUNK_SIZE, (idChunk) -> {
+ TaskChunker.chunk(expandedMemberResourceIds, QUERY_CHUNK_SIZE, (idChunk) -> {
try {
queryResourceTypeWithReferencesToPatients(pids, idChunk, theParams, theDef);
} catch (IOException ex) {
diff --git a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/BaseHapiFhirDao.java b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/BaseHapiFhirDao.java
index 4bc72010bc3..f86ad9310be 100644
--- a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/BaseHapiFhirDao.java
+++ b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/BaseHapiFhirDao.java
@@ -1323,7 +1323,7 @@ public abstract class BaseHapiFhirDao extends BaseStora
* the previous version entity.
*/
if (historyEntry == null) {
- historyEntry = theEntity.toHistory(versionedTags);
+ historyEntry = theEntity.toHistory(versionedTags && theEntity.getDeleted() == null);
}
historyEntry.setEncoding(theChanged.getEncoding());
@@ -1331,7 +1331,7 @@ public abstract class BaseHapiFhirDao extends BaseStora
historyEntry.setResourceTextVc(theChanged.getResourceText());
ourLog.debug("Saving history entry ID[{}] for RES_ID[{}]", historyEntry.getId(), historyEntry.getResourceId());
- myResourceHistoryTableDao.save(historyEntry);
+ myEntityManager.persist(historyEntry);
theEntity.setCurrentVersionEntity(historyEntry);
// Save resource source
@@ -1489,6 +1489,11 @@ public abstract class BaseHapiFhirDao extends BaseStora
wasDeleted = theOldResource.isDeleted();
}
+ if (wasDeleted && !myStorageSettings.isDeleteEnabled()) {
+ String msg = myContext.getLocalizer().getMessage(BaseHapiFhirDao.class, "cantUndeleteWithDeletesDisabled");
+ throw new InvalidRequestException(Msg.code(2573) + msg);
+ }
+
DaoMethodOutcome outcome = toMethodOutcome(
theRequestDetails, savedEntity, theResource, theMatchUrl, theOperationType)
.setCreated(wasDeleted);
diff --git a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/BaseHapiFhirResourceDao.java b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/BaseHapiFhirResourceDao.java
index f1d52bbd30b..c7fc08cae7c 100644
--- a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/BaseHapiFhirResourceDao.java
+++ b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/BaseHapiFhirResourceDao.java
@@ -43,6 +43,7 @@ import ca.uhn.fhir.jpa.api.model.ExpungeOptions;
import ca.uhn.fhir.jpa.api.model.ExpungeOutcome;
import ca.uhn.fhir.jpa.api.model.LazyDaoMethodOutcome;
import ca.uhn.fhir.jpa.api.svc.IIdHelperService;
+import ca.uhn.fhir.jpa.api.svc.ResolveIdentityMode;
import ca.uhn.fhir.jpa.dao.tx.HapiTransactionService;
import ca.uhn.fhir.jpa.delete.DeleteConflictUtil;
import ca.uhn.fhir.jpa.model.cross.IBasePersistedResource;
@@ -567,8 +568,11 @@ public abstract class BaseHapiFhirResourceDao extends B
// Pre-cache the resource ID
jpaPid.setAssociatedResourceId(entity.getIdType(myFhirContext));
- myIdHelperService.addResolvedPidToForcedId(
- jpaPid, theRequestPartitionId, getResourceName(), entity.getFhirId(), null);
+ String fhirId = entity.getFhirId();
+ if (fhirId == null) {
+ fhirId = Long.toString(entity.getId());
+ }
+ myIdHelperService.addResolvedPidToFhirId(jpaPid, theRequestPartitionId, getResourceName(), fhirId, null);
theTransactionDetails.addResolvedResourceId(jpaPid.getAssociatedResourceId(), jpaPid);
theTransactionDetails.addResolvedResource(jpaPid.getAssociatedResourceId(), theResource);
@@ -1736,8 +1740,13 @@ public abstract class BaseHapiFhirResourceDao extends B
validateResourceTypeAndThrowInvalidRequestException(theId);
BaseHasResource entity;
- JpaPid pid = myIdHelperService.resolveResourcePersistentIds(
- requestPartitionId, getResourceName(), theId.getIdPart());
+ JpaPid pid = myIdHelperService
+ .resolveResourceIdentity(
+ requestPartitionId,
+ getResourceName(),
+ theId.getIdPart(),
+ ResolveIdentityMode.includeDeleted().cacheOk())
+ .getPersistentId();
Set readPartitions = null;
if (requestPartitionId.isAllPartitions()) {
entity = myEntityManager.find(ResourceTable.class, pid.getId());
@@ -1779,10 +1788,6 @@ public abstract class BaseHapiFhirResourceDao extends B
}
}
- if (entity == null) {
- throw new ResourceNotFoundException(Msg.code(1996) + "Resource " + theId + " is not known");
- }
-
if (theId.hasVersionIdPart()) {
if (!theId.isVersionIdPartValidLong()) {
throw new ResourceNotFoundException(Msg.code(978)
@@ -1822,7 +1827,10 @@ public abstract class BaseHapiFhirResourceDao extends B
}
}
- Validate.notNull(entity);
+ if (entity == null) {
+ throw new ResourceNotFoundException(Msg.code(1996) + "Resource " + theId + " is not known");
+ }
+
validateResourceType(entity);
if (theCheckForForcedId) {
@@ -1871,8 +1879,27 @@ public abstract class BaseHapiFhirResourceDao extends B
}
if (persistentId == null) {
- persistentId = myIdHelperService.resolveResourcePersistentIds(
- theRequestPartitionId, getResourceName(), theId.getIdPart());
+ String resourceName = getResourceName();
+ if (myStorageSettings.getResourceClientIdStrategy()
+ == JpaStorageSettings.ClientIdStrategyEnum.ALPHANUMERIC) {
+ if (theId.isIdPartValidLong()) {
+ /*
+ * If it's a pure numeric ID and we are in ALPHANUMERIC mode, then the number
+ * corresponds to a DB PID. In this case we want to resolve it regardless of
+ * which type the client has supplied. This is because DB PIDs are unique across
+ * all resource types (unlike FHIR_IDs which are namespaced to the resource type).
+ * We want to load the resource with that PID regardless of type because if
+ * the user is trying to update it we want to fail if the type is wrong, as
+ * opposed to trying to create a new instance.
+ */
+ resourceName = null;
+ }
+ }
+ persistentId = myIdHelperService.resolveResourceIdentityPid(
+ theRequestPartitionId,
+ resourceName,
+ theId.getIdPart(),
+ ResolveIdentityMode.includeDeleted().cacheOk());
}
ResourceTable entity = myEntityManager.find(ResourceTable.class, persistentId.getId());
diff --git a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/BaseHapiFhirSystemDao.java b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/BaseHapiFhirSystemDao.java
index 0aedafefed7..fde57c39836 100644
--- a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/BaseHapiFhirSystemDao.java
+++ b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/BaseHapiFhirSystemDao.java
@@ -192,7 +192,7 @@ public abstract class BaseHapiFhirSystemDao extends B
HapiTransactionService.requireTransaction();
List pids = theResolvedIds.stream().map(t -> ((JpaPid) t).getId()).collect(Collectors.toList());
- new QueryChunker().chunk(pids, idChunk -> {
+ QueryChunker.chunk(pids, idChunk -> {
/*
* Pre-fetch the resources we're touching in this transaction in mass - this reduced the
diff --git a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/HistoryBuilder.java b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/HistoryBuilder.java
index 025f7e198b1..8255daf5486 100644
--- a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/HistoryBuilder.java
+++ b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/HistoryBuilder.java
@@ -81,7 +81,7 @@ public class HistoryBuilder {
private FhirContext myCtx;
@Autowired
- private IIdHelperService myIdHelperService;
+ private IIdHelperService myIdHelperService;
/**
* Constructor
@@ -150,13 +150,13 @@ public class HistoryBuilder {
query.setMaxResults(theToIndex - theFromIndex);
List tables = query.getResultList();
- if (tables.size() > 0) {
+ if (!tables.isEmpty()) {
ImmutableListMultimap resourceIdToHistoryEntries =
Multimaps.index(tables, ResourceHistoryTable::getResourceId);
Set pids = resourceIdToHistoryEntries.keySet().stream()
.map(JpaPid::fromId)
.collect(Collectors.toSet());
- PersistentIdToForcedIdMap pidToForcedId = myIdHelperService.translatePidsToForcedIds(pids);
+ PersistentIdToForcedIdMap pidToForcedId = myIdHelperService.translatePidsToForcedIds(pids);
ourLog.trace("Translated IDs: {}", pidToForcedId.getResourcePersistentIdOptionalMap());
for (Long nextResourceId : resourceIdToHistoryEntries.keySet()) {
diff --git a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/JpaResourceDaoCodeSystem.java b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/JpaResourceDaoCodeSystem.java
index 270238da3fb..1acd2014769 100644
--- a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/JpaResourceDaoCodeSystem.java
+++ b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/JpaResourceDaoCodeSystem.java
@@ -242,13 +242,15 @@ public class JpaResourceDaoCodeSystem extends BaseHapiF
theTransactionDetails,
theForceUpdate,
theCreateNewHistoryEntry);
- if (!retVal.isUnchangedInCurrentOperation()) {
+ if (thePerformIndexing) {
+ if (!retVal.isUnchangedInCurrentOperation()) {
- org.hl7.fhir.r4.model.CodeSystem cs = myVersionCanonicalizer.codeSystemToCanonical(theResource);
- addPidToResource(theEntity, cs);
+ org.hl7.fhir.r4.model.CodeSystem cs = myVersionCanonicalizer.codeSystemToCanonical(theResource);
+ addPidToResource(theEntity, cs);
- myTerminologyCodeSystemStorageSvc.storeNewCodeSystemVersionIfNeeded(
- cs, (ResourceTable) theEntity, theRequest);
+ myTerminologyCodeSystemStorageSvc.storeNewCodeSystemVersionIfNeeded(
+ cs, (ResourceTable) theEntity, theRequest);
+ }
}
return retVal;
diff --git a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/JpaResourceDaoObservation.java b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/JpaResourceDaoObservation.java
index 69088bc9699..eed06fe67da 100644
--- a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/JpaResourceDaoObservation.java
+++ b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/JpaResourceDaoObservation.java
@@ -22,6 +22,8 @@ package ca.uhn.fhir.jpa.dao;
import ca.uhn.fhir.i18n.Msg;
import ca.uhn.fhir.interceptor.model.RequestPartitionId;
import ca.uhn.fhir.jpa.api.dao.IFhirResourceDaoObservation;
+import ca.uhn.fhir.jpa.api.svc.ResolveIdentityMode;
+import ca.uhn.fhir.jpa.model.cross.IResourceLookup;
import ca.uhn.fhir.jpa.model.dao.JpaPid;
import ca.uhn.fhir.jpa.partition.IRequestPartitionHelperSvc;
import ca.uhn.fhir.jpa.searchparam.SearchParameterMap;
@@ -39,12 +41,15 @@ import jakarta.persistence.PersistenceContext;
import jakarta.persistence.PersistenceContextType;
import jakarta.servlet.http.HttpServletResponse;
import org.hl7.fhir.instance.model.api.IBaseResource;
+import org.hl7.fhir.instance.model.api.IIdType;
import org.hl7.fhir.r4.model.Observation;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.transaction.support.TransactionTemplate;
import java.util.ArrayList;
+import java.util.HashMap;
import java.util.List;
+import java.util.Map;
import java.util.TreeMap;
public class JpaResourceDaoObservation extends BaseHapiFhirResourceDao
@@ -138,13 +143,14 @@ public class JpaResourceDaoObservation extends BaseHapi
patientParams.addAll(theSearchParameterMap.get(getSubjectParamName()));
}
+ Map ids = new HashMap<>();
for (List extends IQueryParameterType> nextPatientList : patientParams) {
for (IQueryParameterType nextOr : nextPatientList) {
if (nextOr instanceof ReferenceParam) {
ReferenceParam ref = (ReferenceParam) nextOr;
- JpaPid pid = myIdHelperService.resolveResourcePersistentIds(
- requestPartitionId, ref.getResourceType(), ref.getIdPart());
- orderedSubjectReferenceMap.put(pid.getId(), nextOr);
+ IIdType id = myFhirContext.getVersion().newIdType();
+ id.setParts(null, ref.getResourceType(), ref.getIdPart(), null);
+ ids.put(id, ref);
} else {
throw new IllegalArgumentException(
Msg.code(942) + "Invalid token type (expecting ReferenceParam): " + nextOr.getClass());
@@ -152,6 +158,15 @@ public class JpaResourceDaoObservation extends BaseHapi
}
}
+ Map> resolvedIds = myIdHelperService.resolveResourceIdentities(
+ requestPartitionId,
+ ids.keySet(),
+ ResolveIdentityMode.includeDeleted().cacheOk());
+ for (Map.Entry entry : ids.entrySet()) {
+ IResourceLookup lookup = resolvedIds.get(entry.getKey());
+ orderedSubjectReferenceMap.put(lookup.getPersistentId().getId(), entry.getValue());
+ }
+
theSearchParameterMap.remove(getSubjectParamName());
theSearchParameterMap.remove(getPatientParamName());
diff --git a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/JpaResourceDaoValueSet.java b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/JpaResourceDaoValueSet.java
index 442db1710ad..f70d17849d1 100644
--- a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/JpaResourceDaoValueSet.java
+++ b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/JpaResourceDaoValueSet.java
@@ -303,12 +303,14 @@ public class JpaResourceDaoValueSet extends BaseHapiFhi
theForceUpdate,
theCreateNewHistoryEntry);
- if (getStorageSettings().isPreExpandValueSets() && !retVal.isUnchangedInCurrentOperation()) {
- if (retVal.getDeleted() == null) {
- ValueSet valueSet = myVersionCanonicalizer.valueSetToCanonical(theResource);
- myTerminologySvc.storeTermValueSet(retVal, valueSet);
- } else {
- myTerminologySvc.deleteValueSetAndChildren(retVal);
+ if (thePerformIndexing) {
+ if (getStorageSettings().isPreExpandValueSets() && !retVal.isUnchangedInCurrentOperation()) {
+ if (retVal.getDeleted() == null) {
+ ValueSet valueSet = myVersionCanonicalizer.valueSetToCanonical(theResource);
+ myTerminologySvc.storeTermValueSet(retVal, valueSet);
+ } else {
+ myTerminologySvc.deleteValueSetAndChildren(retVal);
+ }
}
}
diff --git a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/TransactionProcessor.java b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/TransactionProcessor.java
index 86d9f727906..6e0280143bb 100644
--- a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/TransactionProcessor.java
+++ b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/TransactionProcessor.java
@@ -26,25 +26,29 @@ import ca.uhn.fhir.jpa.api.config.JpaStorageSettings;
import ca.uhn.fhir.jpa.api.dao.IFhirSystemDao;
import ca.uhn.fhir.jpa.api.model.DaoMethodOutcome;
import ca.uhn.fhir.jpa.api.svc.IIdHelperService;
+import ca.uhn.fhir.jpa.api.svc.ResolveIdentityMode;
import ca.uhn.fhir.jpa.config.HapiFhirHibernateJpaDialect;
+import ca.uhn.fhir.jpa.model.cross.IResourceLookup;
import ca.uhn.fhir.jpa.model.dao.JpaPid;
import ca.uhn.fhir.jpa.model.entity.ResourceIndexedSearchParamToken;
import ca.uhn.fhir.jpa.model.entity.StorageSettings;
import ca.uhn.fhir.jpa.partition.IRequestPartitionHelperSvc;
import ca.uhn.fhir.jpa.searchparam.MatchUrlService;
import ca.uhn.fhir.jpa.searchparam.SearchParameterMap;
-import ca.uhn.fhir.jpa.util.QueryChunker;
import ca.uhn.fhir.model.api.IQueryParameterType;
import ca.uhn.fhir.rest.api.server.RequestDetails;
import ca.uhn.fhir.rest.api.server.storage.TransactionDetails;
import ca.uhn.fhir.rest.param.TokenParam;
+import ca.uhn.fhir.util.FhirTerser;
import ca.uhn.fhir.util.ResourceReferenceInfo;
import ca.uhn.fhir.util.StopWatch;
+import ca.uhn.fhir.util.TaskChunker;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.ListMultimap;
import jakarta.annotation.Nullable;
import jakarta.persistence.EntityManager;
+import jakarta.persistence.FlushModeType;
import jakarta.persistence.PersistenceContext;
import jakarta.persistence.PersistenceContextType;
import jakarta.persistence.PersistenceException;
@@ -67,6 +71,7 @@ import org.springframework.context.ApplicationContext;
import java.util.ArrayList;
import java.util.Collection;
+import java.util.HashMap;
import java.util.HashSet;
import java.util.IdentityHashMap;
import java.util.List;
@@ -83,6 +88,7 @@ public class TransactionProcessor extends BaseTransactionProcessor {
public static final Pattern SINGLE_PARAMETER_MATCH_URL_PATTERN = Pattern.compile("^[^?]+[?][a-z0-9-]+=[^&,]+$");
private static final Logger ourLog = LoggerFactory.getLogger(TransactionProcessor.class);
+ public static final int CONDITIONAL_URL_FETCH_CHUNK_SIZE = 100;
@Autowired
private ApplicationContext myApplicationContext;
@@ -146,25 +152,51 @@ public class TransactionProcessor extends BaseTransactionProcessor {
List theEntries,
StopWatch theTransactionStopWatch) {
- ITransactionProcessorVersionAdapter, ?> versionAdapter = getVersionAdapter();
- RequestPartitionId requestPartitionId =
- super.determineRequestPartitionIdForWriteEntries(theRequest, theEntries);
+ /*
+ * We temporarily set the flush mode for the duration of the DB transaction
+ * from the default of AUTO to the temporary value of COMMIT here. We do this
+ * because in AUTO mode, if any SQL SELECTs are required during the
+ * processing of an individual transaction entry, the server will flush the
+ * pending INSERTs/UPDATEs to the database before executing the SELECT.
+ * This hurts performance since we don't get the benefit of batching those
+ * write operations as much as possible. The tradeoff here is that we
+ * could theoretically have transaction operations which try to read
+ * data previously written in the same transaction, and they won't see it.
+ * This shouldn't actually be an issue anyhow - we pre-fetch conditional
+ * URLs and reference targets at the start of the transaction. But this
+ * tradeoff still feels worth it, since the most common use of transactions
+ * is for fast writing of data.
+ *
+ * Note that it's probably not necessary to reset it back, it should
+ * automatically go back to the default value after the transaction but
+ * we reset it just to be safe.
+ */
+ FlushModeType initialFlushMode = myEntityManager.getFlushMode();
+ try {
+ myEntityManager.setFlushMode(FlushModeType.COMMIT);
- if (requestPartitionId != null) {
- preFetch(theTransactionDetails, theEntries, versionAdapter, requestPartitionId);
+ ITransactionProcessorVersionAdapter, ?> versionAdapter = getVersionAdapter();
+ RequestPartitionId requestPartitionId =
+ super.determineRequestPartitionIdForWriteEntries(theRequest, theEntries);
+
+ if (requestPartitionId != null) {
+ preFetch(theTransactionDetails, theEntries, versionAdapter, requestPartitionId);
+ }
+
+ return super.doTransactionWriteOperations(
+ theRequest,
+ theActionName,
+ theTransactionDetails,
+ theAllIds,
+ theIdSubstitutions,
+ theIdToPersistedOutcome,
+ theResponse,
+ theOriginalRequestOrder,
+ theEntries,
+ theTransactionStopWatch);
+ } finally {
+ myEntityManager.setFlushMode(initialFlushMode);
}
-
- return super.doTransactionWriteOperations(
- theRequest,
- theActionName,
- theTransactionDetails,
- theAllIds,
- theIdSubstitutions,
- theIdToPersistedOutcome,
- theResponse,
- theOriginalRequestOrder,
- theEntries,
- theTransactionStopWatch);
}
private void preFetch(
@@ -199,40 +231,100 @@ public class TransactionProcessor extends BaseTransactionProcessor {
RequestPartitionId theRequestPartitionId,
Set foundIds,
List idsToPreFetch) {
- List idsToPreResolve = new ArrayList<>();
+
+ FhirTerser terser = myFhirContext.newTerser();
+
+ // Key: The ID of the resource
+ // Value: TRUE if we should prefetch the existing resource details and all stored indexes,
+ // FALSE if we should prefetch only the identity (resource ID and deleted status)
+ Map idsToPreResolve = new HashMap<>(theEntries.size() * 3);
+
for (IBase nextEntry : theEntries) {
IBaseResource resource = theVersionAdapter.getResource(nextEntry);
if (resource != null) {
String verb = theVersionAdapter.getEntryRequestVerb(myFhirContext, nextEntry);
+
+ /*
+ * Pre-fetch any resources that are potentially being directly updated by ID
+ */
if ("PUT".equals(verb) || "PATCH".equals(verb)) {
String requestUrl = theVersionAdapter.getEntryRequestUrl(nextEntry);
- if (countMatches(requestUrl, '/') == 1 && countMatches(requestUrl, '?') == 0) {
+ if (countMatches(requestUrl, '?') == 0) {
IIdType id = myFhirContext.getVersion().newIdType();
id.setValue(requestUrl);
- idsToPreResolve.add(id);
+ IIdType unqualifiedVersionless = id.toUnqualifiedVersionless();
+ idsToPreResolve.put(unqualifiedVersionless, Boolean.TRUE);
+ }
+ }
+
+ /*
+ * Pre-fetch any resources that are referred to directly by ID (don't replace
+ * the TRUE flag with FALSE in case we're updating a resource but also
+ * pointing to that resource elsewhere in the bundle)
+ */
+ if ("PUT".equals(verb) || "POST".equals(verb)) {
+ for (ResourceReferenceInfo referenceInfo : terser.getAllResourceReferences(resource)) {
+ IIdType reference = referenceInfo.getResourceReference().getReferenceElement();
+ if (reference != null
+ && !reference.isLocal()
+ && !reference.isUuid()
+ && reference.hasResourceType()
+ && reference.hasIdPart()
+ && !reference.getValue().contains("?")) {
+ idsToPreResolve.putIfAbsent(reference.toUnqualifiedVersionless(), Boolean.FALSE);
+ }
}
}
}
}
- List outcome =
- myIdHelperService.resolveResourcePersistentIdsWithCache(theRequestPartitionId, idsToPreResolve).stream()
- .collect(Collectors.toList());
- for (JpaPid next : outcome) {
- foundIds.add(
- next.getAssociatedResourceId().toUnqualifiedVersionless().getValue());
- theTransactionDetails.addResolvedResourceId(next.getAssociatedResourceId(), next);
- if (myStorageSettings.getResourceClientIdStrategy() != JpaStorageSettings.ClientIdStrategyEnum.ANY
- || !next.getAssociatedResourceId().isIdPartValidLong()) {
- idsToPreFetch.add(next.getId());
+
+ /*
+ * If all the entries in the pre-fetch ID map have a value of TRUE, this
+ * means we only have IDs associated with resources we're going to directly
+ * update/patch within the transaction. In that case, it's fine to include
+ * deleted resources, since updating them will bring them back to life.
+ *
+ * If we have any FALSE entries, we're also pre-fetching reference targets
+ * which means we don't want deleted resources, because those are not OK
+ * to reference.
+ */
+ boolean preFetchIncludesReferences = idsToPreResolve.values().stream().anyMatch(t -> !t);
+ ResolveIdentityMode resolveMode = preFetchIncludesReferences
+ ? ResolveIdentityMode.excludeDeleted().noCacheUnlessDeletesDisabled()
+ : ResolveIdentityMode.includeDeleted().cacheOk();
+
+ Map> outcomes = myIdHelperService.resolveResourceIdentities(
+ theRequestPartitionId, idsToPreResolve.keySet(), resolveMode);
+ for (Map.Entry> entry : outcomes.entrySet()) {
+ JpaPid next = (JpaPid) entry.getValue().getPersistentId();
+ IIdType unqualifiedVersionlessId = entry.getKey();
+ foundIds.add(unqualifiedVersionlessId.getValue());
+ theTransactionDetails.addResolvedResourceId(unqualifiedVersionlessId, next);
+ if (idsToPreResolve.get(unqualifiedVersionlessId) == Boolean.TRUE) {
+ if (myStorageSettings.getResourceClientIdStrategy() != JpaStorageSettings.ClientIdStrategyEnum.ANY
+ || (next.getAssociatedResourceId() != null
+ && !next.getAssociatedResourceId().isIdPartValidLong())) {
+ idsToPreFetch.add(next.getId());
+ }
}
}
- for (IIdType next : idsToPreResolve) {
- if (!foundIds.contains(next.toUnqualifiedVersionless().getValue())) {
+
+ // Any IDs that could not be resolved are presumably not there, so
+ // cache that fact so we don't look again later
+ for (IIdType next : idsToPreResolve.keySet()) {
+ if (!foundIds.contains(next.getValue())) {
theTransactionDetails.addResolvedResourceId(next.toUnqualifiedVersionless(), null);
}
}
}
+ @Override
+ protected void handleVerbChangeInTransactionWriteOperations() {
+ super.handleVerbChangeInTransactionWriteOperations();
+
+ myEntityManager.flush();
+ }
+
private void preFetchConditionalUrls(
TransactionDetails theTransactionDetails,
List theEntries,
@@ -274,12 +366,10 @@ public class TransactionProcessor extends BaseTransactionProcessor {
}
}
- new QueryChunker()
- .chunk(
- searchParameterMapsToResolve,
- 100,
- map -> preFetchSearchParameterMaps(
- theTransactionDetails, theRequestPartitionId, map, idsToPreFetch));
+ TaskChunker.chunk(
+ searchParameterMapsToResolve,
+ CONDITIONAL_URL_FETCH_CHUNK_SIZE,
+ map -> preFetchSearchParameterMaps(theTransactionDetails, theRequestPartitionId, map, idsToPreFetch));
}
/**
diff --git a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/index/IdHelperService.java b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/index/IdHelperService.java
index 9e81e87f598..c4542ea8aac 100644
--- a/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/index/IdHelperService.java
+++ b/hapi-fhir-jpaserver-base/src/main/java/ca/uhn/fhir/jpa/dao/index/IdHelperService.java
@@ -25,6 +25,7 @@ import ca.uhn.fhir.interceptor.model.RequestPartitionId;
import ca.uhn.fhir.jpa.api.config.JpaStorageSettings;
import ca.uhn.fhir.jpa.api.model.PersistentIdToForcedIdMap;
import ca.uhn.fhir.jpa.api.svc.IIdHelperService;
+import ca.uhn.fhir.jpa.api.svc.ResolveIdentityMode;
import ca.uhn.fhir.jpa.dao.data.IResourceTableDao;
import ca.uhn.fhir.jpa.model.config.PartitionSettings;
import ca.uhn.fhir.jpa.model.cross.IResourceLookup;
@@ -35,11 +36,12 @@ import ca.uhn.fhir.jpa.model.entity.ResourceTable;
import ca.uhn.fhir.jpa.search.builder.SearchBuilder;
import ca.uhn.fhir.jpa.util.MemoryCacheService;
import ca.uhn.fhir.jpa.util.QueryChunker;
-import ca.uhn.fhir.model.primitive.IdDt;
import ca.uhn.fhir.rest.api.server.storage.BaseResourcePersistentId;
import ca.uhn.fhir.rest.server.exceptions.InvalidRequestException;
import ca.uhn.fhir.rest.server.exceptions.PreconditionFailedException;
+import ca.uhn.fhir.rest.server.exceptions.ResourceGoneException;
import ca.uhn.fhir.rest.server.exceptions.ResourceNotFoundException;
+import ca.uhn.fhir.util.TaskChunker;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ListMultimap;
import com.google.common.collect.MultimapBuilder;
@@ -60,11 +62,12 @@ import org.hl7.fhir.instance.model.api.IAnyResource;
import org.hl7.fhir.instance.model.api.IBaseResource;
import org.hl7.fhir.instance.model.api.IIdType;
import org.hl7.fhir.r4.model.IdType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.support.TransactionSynchronizationManager;
-import java.time.LocalDate;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
@@ -79,6 +82,7 @@ import java.util.Set;
import java.util.stream.Collectors;
import static ca.uhn.fhir.jpa.search.builder.predicate.BaseJoiningPredicateBuilder.replaceDefaultPartitionIdIfNonNull;
+import static ca.uhn.fhir.model.primitive.IdDt.isValidLong;
import static org.apache.commons.lang3.StringUtils.isNotBlank;
/**
@@ -102,6 +106,7 @@ import static org.apache.commons.lang3.StringUtils.isNotBlank;
public class IdHelperService implements IIdHelperService {
public static final Predicate[] EMPTY_PREDICATE_ARRAY = new Predicate[0];
public static final String RESOURCE_PID = "RESOURCE_PID";
+ private static final Logger ourLog = LoggerFactory.getLogger(IdHelperService.class);
@Autowired
protected IResourceTableDao myResourceTableDao;
@@ -128,20 +133,6 @@ public class IdHelperService implements IIdHelperService {
myDontCheckActiveTransactionForUnitTest = theDontCheckActiveTransactionForUnitTest;
}
- /**
- * Given a forced ID, convert it to its Long value. Since you are allowed to use string IDs for resources, we need to
- * convert those to the underlying Long values that are stored, for lookup and comparison purposes.
- *
- * @throws ResourceNotFoundException If the ID can not be found
- */
- @Override
- @Nonnull
- public IResourceLookup resolveResourceIdentity(
- @Nonnull RequestPartitionId theRequestPartitionId, String theResourceType, String theResourceId)
- throws ResourceNotFoundException {
- return resolveResourceIdentity(theRequestPartitionId, theResourceType, theResourceId, false);
- }
-
/**
* Given a forced ID, convert it to its Long value. Since you are allowed to use string IDs for resources, we need to
* convert those to the underlying Long values that are stored, for lookup and comparison purposes.
@@ -153,48 +144,236 @@ public class IdHelperService implements IIdHelperService {
@Nonnull
public IResourceLookup resolveResourceIdentity(
@Nonnull RequestPartitionId theRequestPartitionId,
- String theResourceType,
- final String theResourceId,
- boolean theExcludeDeleted)
+ @Nullable String theResourceType,
+ @Nonnull final String theResourceId,
+ @Nonnull ResolveIdentityMode theMode)
throws ResourceNotFoundException {
- assert myDontCheckActiveTransactionForUnitTest || TransactionSynchronizationManager.isSynchronizationActive()
- : "no transaction active";
- String resourceIdToUse = theResourceId;
- if (resourceIdToUse.contains("/")) {
- resourceIdToUse = theResourceId.substring(resourceIdToUse.indexOf("/") + 1);
+ IIdType id;
+ if (theResourceType != null) {
+ id = newIdType(theResourceType + "/" + theResourceId);
+ } else {
+ id = newIdType(theResourceId);
}
- IdDt id = new IdDt(theResourceType, resourceIdToUse);
- Map>> matches =
- translateForcedIdToPids(theRequestPartitionId, Collections.singletonList(id), theExcludeDeleted);
+ List ids = List.of(id);
+ Map> outcome = resolveResourceIdentities(theRequestPartitionId, ids, theMode);
// We only pass 1 input in so only 0..1 will come back
- if (matches.isEmpty() || !matches.containsKey(resourceIdToUse)) {
+ if (!outcome.containsKey(id)) {
throw new ResourceNotFoundException(Msg.code(2001) + "Resource " + id + " is not known");
}
- if (matches.size() > 1 || matches.get(resourceIdToUse).size() > 1) {
- /*
- * This means that:
- * 1. There are two resources with the exact same resource type and forced id
- * 2. The unique constraint on this column-pair has been dropped
- */
- String msg = myFhirCtx.getLocalizer().getMessage(IdHelperService.class, "nonUniqueForcedId");
- throw new PreconditionFailedException(Msg.code(1099) + msg);
+ return outcome.get(id);
+ }
+
+ @Nonnull
+ @Override
+ public Map> resolveResourceIdentities(
+ @Nonnull RequestPartitionId theRequestPartitionId,
+ Collection theIds,
+ ResolveIdentityMode theMode) {
+ assert myDontCheckActiveTransactionForUnitTest || TransactionSynchronizationManager.isSynchronizationActive()
+ : "no transaction active";
+
+ if (theIds.isEmpty()) {
+ return new HashMap<>();
}
- return matches.get(resourceIdToUse).get(0);
+ Collection ids = new ArrayList<>(theIds);
+ ids.forEach(id -> Validate.isTrue(id.hasIdPart()));
+
+ RequestPartitionId requestPartitionId = replaceDefault(theRequestPartitionId);
+ ListMultimap> idToLookup =
+ MultimapBuilder.hashKeys(theIds.size()).arrayListValues(1).build();
+
+ // Do we have any FHIR ID lookups cached for any of the IDs
+ if (theMode.isUseCache(myStorageSettings.isDeleteEnabled()) && !ids.isEmpty()) {
+ resolveResourceIdentitiesForFhirIdsUsingCache(requestPartitionId, theMode, ids, idToLookup);
+ }
+
+ // We still haven't found IDs, let's look them up in the DB
+ if (!ids.isEmpty()) {
+ resolveResourceIdentitiesForFhirIdsUsingDatabase(requestPartitionId, ids, idToLookup);
+ }
+
+ // Convert the multimap into a simple map
+ Map> retVal = new HashMap<>();
+ for (Map.Entry> next : idToLookup.entries()) {
+ if (next.getValue().getDeleted() != null) {
+ if (theMode.isFailOnDeleted()) {
+ String msg = myFhirCtx
+ .getLocalizer()
+ .getMessageSanitized(
+ IdHelperService.class,
+ "deletedId",
+ next.getKey().getValue());
+ throw new ResourceGoneException(Msg.code(2572) + msg);
+ }
+ if (!theMode.isIncludeDeleted()) {
+ continue;
+ }
+ }
+
+ IResourceLookup previousValue = retVal.put(next.getKey(), next.getValue());
+ if (previousValue != null) {
+ /*
+ * This means that either:
+ * 1. There are two resources with the exact same resource type and forced
+ * id. The most likely reason for that is that someone is performing a
+ * multi-partition search and there are resources on each partition
+ * with the same ID.
+ * 2. The unique constraint on the FHIR_ID column has been dropped
+ */
+ ourLog.warn(
+ "Resource ID[{}] corresponds to lookups: {} and {}",
+ next.getKey(),
+ previousValue,
+ next.getValue());
+ String msg = myFhirCtx.getLocalizer().getMessage(IdHelperService.class, "nonUniqueForcedId");
+ throw new PreconditionFailedException(Msg.code(1099) + msg);
+ }
+ }
+
+ return retVal;
}
/**
- * Returns a mapping of Id -> IResourcePersistentId.
- * If any resource is not found, it will throw ResourceNotFound exception (and no map will be returned)
+ * Fetch the resource identity ({@link IResourceLookup}) for a collection of
+ * resource IDs from the internal memory cache if possible. Note that we only
+ * use cached results if deletes are disabled on the server (since it is
+ * therefore not possible that we have an entry in the cache that has since
+ * been deleted but the cache doesn't know about the deletion), or if we
+ * aren't excluding deleted results anyhow.
+ *
+ * @param theRequestPartitionId The partition(s) to search
+ * @param theIdsToResolve The IDs we should look up. Any IDs that are resolved
+ * will be removed from this list. Any IDs remaining in
+ * the list after calling this method still haven't
+ * been attempted to be resolved.
+ * @param theMapToPopulate The results will be populated into this map
*/
- @Override
- @Nonnull
- public Map resolveResourcePersistentIds(
- @Nonnull RequestPartitionId theRequestPartitionId, String theResourceType, List theIds) {
- return resolveResourcePersistentIds(theRequestPartitionId, theResourceType, theIds, false);
+ private void resolveResourceIdentitiesForFhirIdsUsingCache(
+ @Nonnull RequestPartitionId theRequestPartitionId,
+ ResolveIdentityMode theMode,
+ Collection theIdsToResolve,
+ ListMultimap> theMapToPopulate) {
+ for (Iterator idIterator = theIdsToResolve.iterator(); idIterator.hasNext(); ) {
+ IIdType nextForcedId = idIterator.next();
+ MemoryCacheService.ForcedIdCacheKey nextKey = new MemoryCacheService.ForcedIdCacheKey(
+ nextForcedId.getResourceType(), nextForcedId.getIdPart(), theRequestPartitionId);
+ if (theMode.isUseCache(myStorageSettings.isDeleteEnabled())) {
+ List> cachedLookups = myMemoryCacheService.getIfPresent(
+ MemoryCacheService.CacheEnum.RESOURCE_LOOKUP_BY_FORCED_ID, nextKey);
+ if (cachedLookups != null && !cachedLookups.isEmpty()) {
+ idIterator.remove();
+ for (IResourceLookup cachedLookup : cachedLookups) {
+ if (theMode.isIncludeDeleted() || cachedLookup.getDeleted() == null) {
+ theMapToPopulate.put(nextKey.toIdType(myFhirCtx), cachedLookup);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Fetch the resource identity ({@link IResourceLookup}) for a collection of
+ * resource IDs from the database
+ *
+ * @param theRequestPartitionId The partition(s) to search
+ * @param theIdsToResolve The IDs we should look up
+ * @param theMapToPopulate The results will be populated into this map
+ */
+ private void resolveResourceIdentitiesForFhirIdsUsingDatabase(
+ RequestPartitionId theRequestPartitionId,
+ Collection theIdsToResolve,
+ ListMultimap> theMapToPopulate) {
+
+ /*
+ * If we have more than a threshold of IDs, we need to chunk the execution to
+ * avoid having too many parameters in one SQL statement
+ */
+ int maxPageSize = (SearchBuilder.getMaximumPageSize() / 2) - 10;
+ if (theIdsToResolve.size() > maxPageSize) {
+ TaskChunker.chunk(
+ theIdsToResolve,
+ maxPageSize,
+ chunk -> resolveResourceIdentitiesForFhirIdsUsingDatabase(
+ theRequestPartitionId, chunk, theMapToPopulate));
+ return;
+ }
+
+ CriteriaBuilder cb = myEntityManager.getCriteriaBuilder();
+ CriteriaQuery criteriaQuery = cb.createTupleQuery();
+ Root from = criteriaQuery.from(ResourceTable.class);
+ criteriaQuery.multiselect(
+ from.get("myId"),
+ from.get("myResourceType"),
+ from.get("myFhirId"),
+ from.get("myDeleted"),
+ from.get("myPartitionIdValue"));
+
+ List outerAndPredicates = new ArrayList<>(2);
+ if (!theRequestPartitionId.isAllPartitions()) {
+ getOptionalPartitionPredicate(theRequestPartitionId, cb, from).ifPresent(outerAndPredicates::add);
+ }
+
+ // one create one clause per id.
+ List innerIdPredicates = new ArrayList<>(theIdsToResolve.size());
+ boolean haveUntypedIds = false;
+ for (IIdType next : theIdsToResolve) {
+ if (!next.hasResourceType()) {
+ haveUntypedIds = true;
+ }
+
+ List idPredicates = new ArrayList<>(2);
+
+ if (myStorageSettings.getResourceClientIdStrategy() == JpaStorageSettings.ClientIdStrategyEnum.ALPHANUMERIC
+ && next.isIdPartValidLong()) {
+ Predicate typeCriteria = cb.equal(from.get("myId"), next.getIdPartAsLong());
+ idPredicates.add(typeCriteria);
+ } else {
+ if (isNotBlank(next.getResourceType())) {
+ Predicate typeCriteria = cb.equal(from.get("myResourceType"), next.getResourceType());
+ idPredicates.add(typeCriteria);
+ }
+ Predicate idCriteria = cb.equal(from.get("myFhirId"), next.getIdPart());
+ idPredicates.add(idCriteria);
+ }
+
+ innerIdPredicates.add(cb.and(idPredicates.toArray(EMPTY_PREDICATE_ARRAY)));
+ }
+ outerAndPredicates.add(cb.or(innerIdPredicates.toArray(EMPTY_PREDICATE_ARRAY)));
+
+ criteriaQuery.where(cb.and(outerAndPredicates.toArray(EMPTY_PREDICATE_ARRAY)));
+ TypedQuery query = myEntityManager.createQuery(criteriaQuery);
+ List results = query.getResultList();
+ for (Tuple nextId : results) {
+ // Check if the nextId has a resource ID. It may have a null resource ID if a commit is still pending.
+ Long resourcePid = nextId.get(0, Long.class);
+ String resourceType = nextId.get(1, String.class);
+ String fhirId = nextId.get(2, String.class);
+ Date deletedAd = nextId.get(3, Date.class);
+ Integer partitionId = nextId.get(4, Integer.class);
+ if (resourcePid != null) {
+ JpaResourceLookup lookup = new JpaResourceLookup(
+ resourceType, resourcePid, deletedAd, PartitionablePartitionId.with(partitionId, null));
+
+ MemoryCacheService.ForcedIdCacheKey nextKey =
+ new MemoryCacheService.ForcedIdCacheKey(resourceType, fhirId, theRequestPartitionId);
+ IIdType id = nextKey.toIdType(myFhirCtx);
+ theMapToPopulate.put(id, lookup);
+
+ if (haveUntypedIds) {
+ id = nextKey.toIdTypeWithoutResourceType(myFhirCtx);
+ theMapToPopulate.put(id, lookup);
+ }
+
+ List> valueToCache = theMapToPopulate.get(id);
+ myMemoryCacheService.putAfterCommit(
+ MemoryCacheService.CacheEnum.RESOURCE_LOOKUP_BY_FORCED_ID, nextKey, valueToCache);
+ }
+ }
}
/**
@@ -208,7 +387,7 @@ public class IdHelperService implements IIdHelperService {
@Nonnull RequestPartitionId theRequestPartitionId,
String theResourceType,
List theIds,
- boolean theExcludeDeleted) {
+ ResolveIdentityMode theMode) {
assert myDontCheckActiveTransactionForUnitTest || TransactionSynchronizationManager.isSynchronizationActive();
Validate.notNull(theIds, "theIds cannot be null");
Validate.isTrue(!theIds.isEmpty(), "theIds must not be empty");
@@ -224,7 +403,7 @@ public class IdHelperService implements IIdHelperService {
// is a forced id
// we must resolve!
if (myStorageSettings.isDeleteEnabled()) {
- retVal = resolveResourceIdentity(theRequestPartitionId, theResourceType, id, theExcludeDeleted)
+ retVal = resolveResourceIdentity(theRequestPartitionId, theResourceType, id, theMode)
.getPersistentId();
retVals.put(id, retVal);
} else {
@@ -249,18 +428,6 @@ public class IdHelperService implements IIdHelperService {
return retVals;
}
- /**
- * Given a resource type and ID, determines the internal persistent ID for the resource.
- *
- * @throws ResourceNotFoundException If the ID can not be found
- */
- @Override
- @Nonnull
- public JpaPid resolveResourcePersistentIds(
- @Nonnull RequestPartitionId theRequestPartitionId, String theResourceType, String theId) {
- return resolveResourcePersistentIds(theRequestPartitionId, theResourceType, theId, false);
- }
-
/**
* Given a resource type and ID, determines the internal persistent ID for the resource.
* Optionally filters out deleted resources.
@@ -273,11 +440,11 @@ public class IdHelperService implements IIdHelperService {
@Nonnull RequestPartitionId theRequestPartitionId,
String theResourceType,
String theId,
- boolean theExcludeDeleted) {
+ ResolveIdentityMode theMode) {
Validate.notNull(theId, "theId must not be null");
Map retVal = resolveResourcePersistentIds(
- theRequestPartitionId, theResourceType, Collections.singletonList(theId), theExcludeDeleted);
+ theRequestPartitionId, theResourceType, Collections.singletonList(theId), theMode);
return retVal.get(theId); // should be only one
}
@@ -359,11 +526,11 @@ public class IdHelperService implements IIdHelperService {
idsToCheck.add(nextId);
}
- new QueryChunker()
- .chunk(
- idsToCheck,
- SearchBuilder.getMaximumPageSize() / 2,
- ids -> doResolvePersistentIds(theRequestPartitionId, ids, retVal));
+ new QueryChunker();
+ TaskChunker.chunk(
+ idsToCheck,
+ SearchBuilder.getMaximumPageSize() / 2,
+ ids -> doResolvePersistentIds(theRequestPartitionId, ids, retVal));
}
return retVal;
@@ -430,18 +597,30 @@ public class IdHelperService implements IIdHelperService {
RequestPartitionId theRequestPartitionId, CriteriaBuilder cb, Root from) {
if (myPartitionSettings.isAllowUnqualifiedCrossPartitionReference()) {
return Optional.empty();
- } else if (theRequestPartitionId.isDefaultPartition() && myPartitionSettings.getDefaultPartitionId() == null) {
- Predicate partitionIdCriteria = cb.isNull(from.get("myPartitionIdValue"));
- return Optional.of(partitionIdCriteria);
- } else if (!theRequestPartitionId.isAllPartitions()) {
+ } else if (theRequestPartitionId.isAllPartitions()) {
+ return Optional.empty();
+ } else {
List partitionIds = theRequestPartitionId.getPartitionIds();
partitionIds = replaceDefaultPartitionIdIfNonNull(myPartitionSettings, partitionIds);
- if (partitionIds.size() > 1) {
- Predicate partitionIdCriteria = from.get("myPartitionIdValue").in(partitionIds);
- return Optional.of(partitionIdCriteria);
- } else if (partitionIds.size() == 1) {
- Predicate partitionIdCriteria = cb.equal(from.get("myPartitionIdValue"), partitionIds.get(0));
- return Optional.of(partitionIdCriteria);
+ if (partitionIds.contains(null)) {
+ Predicate partitionIdNullCriteria =
+ from.get("myPartitionIdValue").isNull();
+ if (partitionIds.size() == 1) {
+ return Optional.of(partitionIdNullCriteria);
+ } else {
+ Predicate partitionIdCriteria = from.get("myPartitionIdValue")
+ .in(partitionIds.stream().filter(t -> t != null).collect(Collectors.toList()));
+ return Optional.of(cb.or(partitionIdCriteria, partitionIdNullCriteria));
+ }
+ } else {
+ if (partitionIds.size() > 1) {
+ Predicate partitionIdCriteria =
+ from.get("myPartitionIdValue").in(partitionIds);
+ return Optional.of(partitionIdCriteria);
+ } else if (partitionIds.size() == 1) {
+ Predicate partitionIdCriteria = cb.equal(from.get("myPartitionIdValue"), partitionIds.get(0));
+ return Optional.of(partitionIdCriteria);
+ }
}
}
return Optional.empty();
@@ -475,6 +654,7 @@ public class IdHelperService implements IIdHelperService {
return retVal;
}
+ @SuppressWarnings("OptionalAssignedToNull")
@Override
public Optional translatePidIdToForcedIdWithCache(JpaPid theId) {
// do getIfPresent and then put to avoid doing I/O inside the cache.
@@ -492,112 +672,6 @@ public class IdHelperService implements IIdHelperService {
return forcedId;
}
- private ListMultimap organizeIdsByResourceType(Collection theIds) {
- ListMultimap typeToIds =
- MultimapBuilder.hashKeys().arrayListValues().build();
- for (IIdType nextId : theIds) {
- if (myStorageSettings.getResourceClientIdStrategy() == JpaStorageSettings.ClientIdStrategyEnum.ANY
- || !isValidPid(nextId)) {
- if (nextId.hasResourceType()) {
- typeToIds.put(nextId.getResourceType(), nextId.getIdPart());
- } else {
- typeToIds.put("", nextId.getIdPart());
- }
- }
- }
- return typeToIds;
- }
-
- private Map>> translateForcedIdToPids(
- @Nonnull RequestPartitionId theRequestPartitionId, Collection theId, boolean theExcludeDeleted) {
- theId.forEach(id -> Validate.isTrue(id.hasIdPart()));
-
- if (theId.isEmpty()) {
- return new HashMap<>();
- }
-
- Map>> retVal = new HashMap<>();
- RequestPartitionId requestPartitionId = replaceDefault(theRequestPartitionId);
-
- if (myStorageSettings.getResourceClientIdStrategy() != JpaStorageSettings.ClientIdStrategyEnum.ANY) {
- List pids = theId.stream()
- .filter(t -> isValidPid(t))
- .map(IIdType::getIdPartAsLong)
- .collect(Collectors.toList());
- if (!pids.isEmpty()) {
- resolvePids(requestPartitionId, pids, retVal);
- }
- }
-
- // returns a map of resourcetype->id
- ListMultimap typeToIds = organizeIdsByResourceType(theId);
- for (Map.Entry> nextEntry : typeToIds.asMap().entrySet()) {
- String nextResourceType = nextEntry.getKey();
- Collection nextIds = nextEntry.getValue();
-
- if (!myStorageSettings.isDeleteEnabled()) {
- for (Iterator forcedIdIterator = nextIds.iterator(); forcedIdIterator.hasNext(); ) {
- String nextForcedId = forcedIdIterator.next();
- String nextKey = nextResourceType + "/" + nextForcedId;
- IResourceLookup cachedLookup =
- myMemoryCacheService.getIfPresent(MemoryCacheService.CacheEnum.RESOURCE_LOOKUP, nextKey);
- if (cachedLookup != null) {
- forcedIdIterator.remove();
- retVal.computeIfAbsent(nextForcedId, id -> new ArrayList<>())
- .add(cachedLookup);
- }
- }
- }
-
- if (!nextIds.isEmpty()) {
- Collection