5621 deadlock on caffeine cache when creating a resource with conditional create (#5622)
* Modifying the CacheProvider to avoid doing db I/O within the cache miss value supplier callback. * Setting the initial capacity of instantiated caches to the cache max size to avoid resizing during operations. * adding changelog and spotless. * Fixing typo. * Addressing comments from code review. --------- Co-authored-by: peartree <etienne.poirier@smilecdr.com>
This commit is contained in:
parent
b982abbfbe
commit
47867fc628
|
@ -0,0 +1,4 @@
|
|||
---
|
||||
type: fix
|
||||
issue: 5621
|
||||
title: "Fixed a deadlock in resource conditional create."
|
|
@ -477,10 +477,19 @@ public class IdHelperService implements IIdHelperService<JpaPid> {
|
|||
|
||||
@Override
|
||||
public Optional<String> translatePidIdToForcedIdWithCache(JpaPid theId) {
|
||||
return myMemoryCacheService.get(
|
||||
MemoryCacheService.CacheEnum.PID_TO_FORCED_ID,
|
||||
theId.getId(),
|
||||
pid -> myResourceTableDao.findById(pid).map(ResourceTable::asTypedFhirResourceId));
|
||||
// do getIfPresent and then put to avoid doing I/O inside the cache.
|
||||
Optional<String> forcedId =
|
||||
myMemoryCacheService.getIfPresent(MemoryCacheService.CacheEnum.PID_TO_FORCED_ID, theId.getId());
|
||||
|
||||
if (forcedId == null) {
|
||||
// This is only called when we know the resource exists.
|
||||
// So this optional is only empty when there is no hfj_forced_id table
|
||||
// note: this is obsolete with the new fhir_id column, and will go away.
|
||||
forcedId = myResourceTableDao.findById(theId.getId()).map(ResourceTable::asTypedFhirResourceId);
|
||||
myMemoryCacheService.put(MemoryCacheService.CacheEnum.PID_TO_FORCED_ID, theId.getId(), forcedId);
|
||||
}
|
||||
|
||||
return forcedId;
|
||||
}
|
||||
|
||||
private ListMultimap<String, String> organizeIdsByResourceType(Collection<IIdType> theIds) {
|
||||
|
|
|
@ -44,6 +44,9 @@ public class CacheProvider<K, V> implements ca.uhn.fhir.sl.cache.CacheProvider<K
|
|||
public Cache<K, V> create(long timeoutMillis, long maximumSize) {
|
||||
return new CacheDelegator<K, V>(Caffeine.newBuilder()
|
||||
.expireAfterWrite(timeoutMillis, TimeUnit.MILLISECONDS)
|
||||
// Caffeine locks the whole array when growing the hash table.
|
||||
// Set initial capacity to max to avoid this. All our caches are <1M entries.
|
||||
.initialCapacity((int) maximumSize)
|
||||
.maximumSize(maximumSize)
|
||||
.build());
|
||||
}
|
||||
|
@ -51,6 +54,9 @@ public class CacheProvider<K, V> implements ca.uhn.fhir.sl.cache.CacheProvider<K
|
|||
public LoadingCache<K, V> create(long timeoutMillis, long maximumSize, CacheLoader<K, V> loading) {
|
||||
return new LoadingCacheDelegator<K, V>(Caffeine.newBuilder()
|
||||
.expireAfterWrite(timeoutMillis, TimeUnit.MILLISECONDS)
|
||||
// Caffeine locks the whole array when growing the hash table.
|
||||
// Set initial capacity to max to avoid this. All our caches are <1M entries.
|
||||
.initialCapacity((int) maximumSize)
|
||||
.maximumSize(maximumSize)
|
||||
.build(loading::load));
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue