Compare commits
3 Commits
7863f03c68
...
db581dd158
Author | SHA1 | Date |
---|---|---|
Tadgh | db581dd158 | |
Chris0296 | 1f7b605a18 | |
Emre Dincturk | 77da1deeda |
|
@ -0,0 +1,31 @@
|
|||
package ca.uhn.fhir.util;
|
||||
|
||||
/**
|
||||
* A utility class for thread sleeps.
|
||||
* Uses non-static methods for easier mocking and unnecessary waits in unit tests
|
||||
*/
|
||||
public class SleepUtil {
|
||||
private static final org.slf4j.Logger ourLog = org.slf4j.LoggerFactory.getLogger(SleepUtil.class);
|
||||
|
||||
public void sleepAtLeast(long theMillis) {
|
||||
sleepAtLeast(theMillis, true);
|
||||
}
|
||||
|
||||
@SuppressWarnings("BusyWait")
|
||||
public void sleepAtLeast(long theMillis, boolean theLogProgress) {
|
||||
long start = System.currentTimeMillis();
|
||||
while (System.currentTimeMillis() <= start + theMillis) {
|
||||
try {
|
||||
long timeSinceStarted = System.currentTimeMillis() - start;
|
||||
long timeToSleep = Math.max(0, theMillis - timeSinceStarted);
|
||||
if (theLogProgress) {
|
||||
ourLog.info("Sleeping for {}ms", timeToSleep);
|
||||
}
|
||||
Thread.sleep(timeToSleep);
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
ourLog.error("Interrupted", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -31,6 +31,9 @@ import static org.apache.commons.lang3.StringUtils.defaultString;
|
|||
|
||||
public class TestUtil {
|
||||
private static final org.slf4j.Logger ourLog = org.slf4j.LoggerFactory.getLogger(TestUtil.class);
|
||||
|
||||
private static SleepUtil ourSleepUtil = new SleepUtil();
|
||||
|
||||
private static boolean ourShouldRandomizeTimezones = true;
|
||||
|
||||
public static void setShouldRandomizeTimezones(boolean theShouldRandomizeTimezones) {
|
||||
|
@ -135,25 +138,22 @@ public class TestUtil {
|
|||
return stripReturns(theString).replace(" ", "");
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* In production code, instead of this static method, it is better to use an instance of SleepUtil.
|
||||
* Since SleepUtil isn't using static methods, it is easier to mock for unit test and avoid unnecessary waits in
|
||||
* unit tests
|
||||
*/
|
||||
public static void sleepAtLeast(long theMillis) {
|
||||
sleepAtLeast(theMillis, true);
|
||||
ourSleepUtil.sleepAtLeast(theMillis);
|
||||
}
|
||||
|
||||
@SuppressWarnings("BusyWait")
|
||||
/**
|
||||
* In production code, instead of this static method, it is better to use an instance of SleepUtil.
|
||||
* Since SleepUtil isn't using static methods, it is easier to mock for unit test and avoid unnecessary waits in
|
||||
* unit tests
|
||||
*/
|
||||
public static void sleepAtLeast(long theMillis, boolean theLogProgress) {
|
||||
long start = System.currentTimeMillis();
|
||||
while (System.currentTimeMillis() <= start + theMillis) {
|
||||
try {
|
||||
long timeSinceStarted = System.currentTimeMillis() - start;
|
||||
long timeToSleep = Math.max(0, theMillis - timeSinceStarted);
|
||||
if (theLogProgress) {
|
||||
ourLog.info("Sleeping for {}ms", timeToSleep);
|
||||
}
|
||||
Thread.sleep(timeToSleep);
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
ourLog.error("Interrupted", e);
|
||||
}
|
||||
}
|
||||
ourSleepUtil.sleepAtLeast(theMillis, theLogProgress);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
package ca.uhn.fhir.util;
|
||||
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.*;
|
||||
|
||||
class SleepUtilTest {
|
||||
|
||||
@Test
|
||||
public void testSleepAtLeast() {
|
||||
SleepUtil sleepUtil = new SleepUtil();
|
||||
long amountToSleepMs = 10;
|
||||
|
||||
long start = System.currentTimeMillis();
|
||||
sleepUtil.sleepAtLeast(amountToSleepMs);
|
||||
long stop = System.currentTimeMillis();
|
||||
|
||||
long actualSleepDurationMs = stop - start;
|
||||
assertTrue(actualSleepDurationMs >= amountToSleepMs);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testZeroMs() {
|
||||
// 0 is a valid input
|
||||
SleepUtil sleepUtil = new SleepUtil();
|
||||
sleepUtil.sleepAtLeast(0);
|
||||
}
|
||||
|
||||
}
|
|
@ -5,6 +5,9 @@ import ca.uhn.fhir.jpa.migrate.JdbcUtils;
|
|||
import ca.uhn.fhir.jpa.migrate.SchemaMigrator;
|
||||
import ca.uhn.fhir.jpa.migrate.dao.HapiMigrationDao;
|
||||
import ca.uhn.fhir.jpa.migrate.entity.HapiMigrationEntity;
|
||||
import ca.uhn.fhir.jpa.migrate.SchemaMigrator;
|
||||
import ca.uhn.fhir.jpa.migrate.dao.HapiMigrationDao;
|
||||
import ca.uhn.fhir.jpa.migrate.entity.HapiMigrationEntity;
|
||||
import ca.uhn.fhir.jpa.util.RandomTextUtils;
|
||||
import ca.uhn.fhir.system.HapiSystemProperties;
|
||||
import com.google.common.base.Charsets;
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
### Major Database Change
|
||||
|
||||
This release contains a migration that covers every resource.
|
||||
This may take several minutes on a larger system (e.g. 10 minutes for 100 million resources).
|
||||
For zero-downtime, or for larger systems, we recommend you upgrade the schema using the CLI tools.
|
|
@ -0,0 +1,3 @@
|
|||
---
|
||||
release-date: "2023-08-31"
|
||||
codename: "Zed"
|
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
type: fix
|
||||
issue: 4634
|
||||
title: "Previously, rule builder could not effectively handle Patient Type-Level Exports. It would over-permit requests
|
||||
in certain scenarios. This fix allows for accumulation of ids on a Patient Type-Level Bulk export to enable us to
|
||||
properly match the requested Patient IDs against the users permitted Patient IDs."
|
|
@ -2,5 +2,6 @@
|
|||
type: fix
|
||||
issue: 5486
|
||||
jira: SMILE-7457
|
||||
backport: 6.10.1
|
||||
title: "Previously, testing database migration with cli migrate-database command in dry-run mode would insert in the
|
||||
migration task table. The issue has been fixed."
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
type: fix
|
||||
issue: 5511
|
||||
backport: 6.10.1
|
||||
title: "Previously, when creating an index as a part of a migration, if the index already existed with a different name
|
||||
on Oracle, the migration would fail. This has been fixed so that the create index migration task now recovers with
|
||||
a warning message if the index already exists with a different name."
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
type: fix
|
||||
issue: 5546
|
||||
backport: 6.10.1
|
||||
title: "A database migration added trailing spaces to server-assigned resource ids.
|
||||
This fix removes the bad migration, and adds another migration to fix the errors."
|
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
type: fix
|
||||
issue: 5553
|
||||
title: "mdm-clear jobs are prone to failing because of deadlocks when running on SQL Server. Such job failures have
|
||||
been mitigated to some extent by increasing the retries on deadlocks."
|
|
@ -29,6 +29,7 @@ import ca.uhn.fhir.jpa.migrate.taskdef.CalculateHashesTask;
|
|||
import ca.uhn.fhir.jpa.migrate.taskdef.CalculateOrdinalDatesTask;
|
||||
import ca.uhn.fhir.jpa.migrate.taskdef.ColumnTypeEnum;
|
||||
import ca.uhn.fhir.jpa.migrate.taskdef.ForceIdMigrationCopyTask;
|
||||
import ca.uhn.fhir.jpa.migrate.taskdef.ForceIdMigrationFixTask;
|
||||
import ca.uhn.fhir.jpa.migrate.tasks.api.BaseMigrationTasks;
|
||||
import ca.uhn.fhir.jpa.migrate.tasks.api.Builder;
|
||||
import ca.uhn.fhir.jpa.model.config.PartitionSettings;
|
||||
|
@ -140,10 +141,19 @@ public class HapiFhirJpaMigrationTasks extends BaseMigrationTasks<VersionEnum> {
|
|||
|
||||
// Move forced_id constraints to hfj_resource and the new fhir_id column
|
||||
// Note: we leave the HFJ_FORCED_ID.IDX_FORCEDID_TYPE_FID index in place to support old writers for a while.
|
||||
version.addTask(new ForceIdMigrationCopyTask(version.getRelease(), "20231018.1"));
|
||||
version.addTask(new ForceIdMigrationCopyTask(version.getRelease(), "20231018.1").setDoNothing(true));
|
||||
|
||||
Builder.BuilderWithTableName hfjResource = version.onTable("HFJ_RESOURCE");
|
||||
hfjResource.modifyColumn("20231018.2", "FHIR_ID").nonNullable();
|
||||
// commented out to make numeric space for the fix task below.
|
||||
// This constraint can't be enabled until the column is fully populated, and the shipped version of 20231018.1
|
||||
// was broken.
|
||||
// hfjResource.modifyColumn("20231018.2", "FHIR_ID").nonNullable();
|
||||
|
||||
// this was inserted after the release.
|
||||
version.addTask(new ForceIdMigrationFixTask(version.getRelease(), "20231018.3"));
|
||||
|
||||
// added back in place of 20231018.2. If 20231018.2 already ran, this is a no-op.
|
||||
hfjResource.modifyColumn("20231018.4", "FHIR_ID").nonNullable();
|
||||
|
||||
hfjResource.dropIndex("20231027.1", "IDX_RES_FHIR_ID");
|
||||
hfjResource
|
||||
|
@ -187,6 +197,8 @@ public class HapiFhirJpaMigrationTasks extends BaseMigrationTasks<VersionEnum> {
|
|||
"SP_URI".toLowerCase()),
|
||||
"Column HFJ_SPIDX_STRING.SP_VALUE_NORMALIZED already has a collation of 'C' so doing nothing");
|
||||
}
|
||||
|
||||
version.addTask(new ForceIdMigrationFixTask(version.getRelease(), "20231213.1"));
|
||||
}
|
||||
|
||||
protected void init680() {
|
||||
|
|
|
@ -19,16 +19,18 @@ import org.junit.jupiter.params.ParameterizedTest;
|
|||
import org.junit.jupiter.params.provider.ArgumentsSource;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.jdbc.core.JdbcTemplate;
|
||||
import org.testcontainers.junit.jupiter.Testcontainers;
|
||||
|
||||
import javax.sql.DataSource;
|
||||
import java.sql.SQLException;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
|
||||
import static ca.uhn.fhir.jpa.embedded.HapiEmbeddedDatabasesExtension.FIRST_TESTED_VERSION;
|
||||
import static ca.uhn.fhir.jpa.migrate.SchemaMigrator.HAPI_FHIR_MIGRATION_TABLENAME;
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
|
||||
|
@ -75,7 +77,7 @@ public class HapiSchemaMigrationTest {
|
|||
|
||||
VersionEnum[] allVersions = VersionEnum.values();
|
||||
|
||||
Set<VersionEnum> dataVersions = Set.of(
|
||||
List<VersionEnum> dataVersions = List.of(
|
||||
VersionEnum.V5_2_0,
|
||||
VersionEnum.V5_3_0,
|
||||
VersionEnum.V5_4_0,
|
||||
|
@ -105,6 +107,8 @@ public class HapiSchemaMigrationTest {
|
|||
new HapiForeignKeyIndexHelper()
|
||||
.ensureAllForeignKeysAreIndexed(dataSource);
|
||||
}
|
||||
|
||||
verifyForcedIdMigration(dataSource);
|
||||
}
|
||||
|
||||
private static void migrate(DriverTypeEnum theDriverType, DataSource dataSource, HapiMigrationStorageSvc hapiMigrationStorageSvc, VersionEnum from, VersionEnum to) throws SQLException {
|
||||
|
@ -123,6 +127,19 @@ public class HapiSchemaMigrationTest {
|
|||
schemaMigrator.migrate();
|
||||
}
|
||||
|
||||
/**
|
||||
* For bug https://github.com/hapifhir/hapi-fhir/issues/5546
|
||||
*/
|
||||
private void verifyForcedIdMigration(DataSource theDataSource) throws SQLException {
|
||||
JdbcTemplate jdbcTemplate = new JdbcTemplate(theDataSource);
|
||||
@SuppressWarnings("DataFlowIssue")
|
||||
int nullCount = jdbcTemplate.queryForObject("select count(1) from hfj_resource where fhir_id is null", Integer.class);
|
||||
assertEquals(0, nullCount, "no fhir_id should be null");
|
||||
int trailingSpaceCount = jdbcTemplate.queryForObject("select count(1) from hfj_resource where fhir_id <> trim(fhir_id)", Integer.class);
|
||||
assertEquals(0, trailingSpaceCount, "no fhir_id should contain a space");
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testCreateMigrationTableIfRequired() throws SQLException {
|
||||
// Setup
|
||||
|
|
|
@ -252,6 +252,7 @@ public class RuleBuilder implements IAuthRuleBuilder {
|
|||
private final String myRuleName;
|
||||
private RuleBuilderRuleOp myReadRuleBuilder;
|
||||
private RuleBuilderRuleOp myWriteRuleBuilder;
|
||||
private RuleBuilderBulkExport ruleBuilderBulkExport;
|
||||
|
||||
RuleBuilderRule(PolicyEnum theRuleMode, String theRuleName) {
|
||||
myRuleMode = theRuleMode;
|
||||
|
@ -333,7 +334,10 @@ public class RuleBuilder implements IAuthRuleBuilder {
|
|||
|
||||
@Override
|
||||
public IAuthRuleBuilderRuleBulkExport bulkExport() {
|
||||
return new RuleBuilderBulkExport();
|
||||
if (ruleBuilderBulkExport == null) {
|
||||
ruleBuilderBulkExport = new RuleBuilderBulkExport();
|
||||
}
|
||||
return ruleBuilderBulkExport;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -859,6 +863,7 @@ public class RuleBuilder implements IAuthRuleBuilder {
|
|||
}
|
||||
|
||||
private class RuleBuilderBulkExport implements IAuthRuleBuilderRuleBulkExport {
|
||||
private RuleBulkExportImpl ruleBulkExport;
|
||||
|
||||
@Override
|
||||
public IAuthRuleBuilderRuleBulkExportWithTarget groupExportOnGroup(@Nonnull String theFocusResourceId) {
|
||||
|
@ -872,12 +877,21 @@ public class RuleBuilder implements IAuthRuleBuilder {
|
|||
|
||||
@Override
|
||||
public IAuthRuleBuilderRuleBulkExportWithTarget patientExportOnPatient(@Nonnull String theFocusResourceId) {
|
||||
RuleBulkExportImpl rule = new RuleBulkExportImpl(myRuleName);
|
||||
rule.setAppliesToPatientExport(theFocusResourceId);
|
||||
rule.setMode(myRuleMode);
|
||||
myRules.add(rule);
|
||||
if (ruleBulkExport == null) {
|
||||
RuleBulkExportImpl rule = new RuleBulkExportImpl(myRuleName);
|
||||
rule.setAppliesToPatientExport(theFocusResourceId);
|
||||
rule.setMode(myRuleMode);
|
||||
ruleBulkExport = rule;
|
||||
} else {
|
||||
ruleBulkExport.setAppliesToPatientExport(theFocusResourceId);
|
||||
}
|
||||
|
||||
return new RuleBuilderBulkExportWithTarget(rule);
|
||||
// prevent duplicate rules being added
|
||||
if (!myRules.contains(ruleBulkExport)) {
|
||||
myRules.add(ruleBulkExport);
|
||||
}
|
||||
|
||||
return new RuleBuilderBulkExportWithTarget(ruleBulkExport);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -27,6 +27,7 @@ import ca.uhn.fhir.rest.api.server.bulk.BulkExportJobParameters;
|
|||
import org.hl7.fhir.instance.model.api.IBaseResource;
|
||||
import org.hl7.fhir.instance.model.api.IIdType;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
@ -40,13 +41,14 @@ import static org.apache.commons.lang3.StringUtils.isNotBlank;
|
|||
public class RuleBulkExportImpl extends BaseRule {
|
||||
private static final org.slf4j.Logger ourLog = org.slf4j.LoggerFactory.getLogger(RuleBulkExportImpl.class);
|
||||
private String myGroupId;
|
||||
private String myPatientId;
|
||||
private final Collection<String> myPatientIds;
|
||||
private BulkExportJobParameters.ExportStyle myWantExportStyle;
|
||||
private Collection<String> myResourceTypes;
|
||||
private boolean myWantAnyStyle;
|
||||
|
||||
RuleBulkExportImpl(String theRuleName) {
|
||||
super(theRuleName);
|
||||
myPatientIds = new ArrayList<>();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -111,19 +113,25 @@ public class RuleBulkExportImpl extends BaseRule {
|
|||
}
|
||||
}
|
||||
|
||||
// TODO This is a _bad bad bad implementation_ but we are out of time.
|
||||
// 1. If a claimed resource ID is present in the parameters, and the permission contains one, check for
|
||||
// membership
|
||||
// 2. If not a member, Deny.
|
||||
if (myWantExportStyle == BulkExportJobParameters.ExportStyle.PATIENT && isNotBlank(myPatientId)) {
|
||||
final String expectedPatientId =
|
||||
new IdDt(myPatientId).toUnqualifiedVersionless().getValue();
|
||||
// 1. If each of the requested resource IDs in the parameters are present in the users permissions, Approve
|
||||
// 2. If any requested ID is not present in the users permissions, Deny.
|
||||
if (myWantExportStyle == BulkExportJobParameters.ExportStyle.PATIENT && isNotEmpty(myPatientIds)) {
|
||||
List<String> permittedPatientIds = myPatientIds.stream()
|
||||
.map(id -> new IdDt(id).toUnqualifiedVersionless().getValue())
|
||||
.collect(Collectors.toList());
|
||||
if (!options.getPatientIds().isEmpty()) {
|
||||
ourLog.debug("options.getPatientIds() != null");
|
||||
final String actualPatientIds = options.getPatientIds().stream()
|
||||
List<String> requestedPatientIds = options.getPatientIds().stream()
|
||||
.map(t -> new IdDt(t).toUnqualifiedVersionless().getValue())
|
||||
.collect(Collectors.joining(","));
|
||||
if (actualPatientIds.contains(expectedPatientId)) {
|
||||
.collect(Collectors.toList());
|
||||
boolean requestedPatientsPermitted = true;
|
||||
for (String requestedPatientId : requestedPatientIds) {
|
||||
if (!permittedPatientIds.contains(requestedPatientId)) {
|
||||
requestedPatientsPermitted = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (requestedPatientsPermitted) {
|
||||
return newVerdict(
|
||||
theOperation,
|
||||
theRequestDetails,
|
||||
|
@ -138,8 +146,6 @@ public class RuleBulkExportImpl extends BaseRule {
|
|||
|
||||
final List<String> filters = options.getFilters();
|
||||
|
||||
// TODO: LD: This admittedly adds more to the tech debt above, and should really be addressed by
|
||||
// https://github.com/hapifhir/hapi-fhir/issues/4990
|
||||
if (!filters.isEmpty()) {
|
||||
ourLog.debug("filters not empty");
|
||||
final Set<String> patientIdsInFilters = filters.stream()
|
||||
|
@ -147,7 +153,15 @@ public class RuleBulkExportImpl extends BaseRule {
|
|||
.map(filter -> filter.replace("?_id=", "/"))
|
||||
.collect(Collectors.toUnmodifiableSet());
|
||||
|
||||
if (patientIdsInFilters.contains(expectedPatientId)) {
|
||||
boolean filteredPatientIdsPermitted = true;
|
||||
for (String patientIdInFilters : patientIdsInFilters) {
|
||||
if (!permittedPatientIds.contains(patientIdInFilters)) {
|
||||
filteredPatientIdsPermitted = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (filteredPatientIdsPermitted) {
|
||||
return newVerdict(
|
||||
theOperation,
|
||||
theRequestDetails,
|
||||
|
@ -176,7 +190,7 @@ public class RuleBulkExportImpl extends BaseRule {
|
|||
|
||||
public void setAppliesToPatientExport(String thePatientId) {
|
||||
myWantExportStyle = BulkExportJobParameters.ExportStyle.PATIENT;
|
||||
myPatientId = thePatientId;
|
||||
myPatientIds.add(thePatientId);
|
||||
}
|
||||
|
||||
public void setAppliesToSystem() {
|
||||
|
|
|
@ -10,6 +10,7 @@ import java.util.List;
|
|||
import static org.hamcrest.Matchers.contains;
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.hamcrest.MatcherAssert.assertThat;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
public class RuleBuilderTest {
|
||||
|
@ -87,6 +88,19 @@ public class RuleBuilderTest {
|
|||
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBulkExport_PatientExportOnPatient_MultiplePatientsSingleRule() {
|
||||
RuleBuilder builder = new RuleBuilder();
|
||||
List<String> resourceTypes = new ArrayList<>();
|
||||
resourceTypes.add("Patient");
|
||||
|
||||
builder.allow().bulkExport().patientExportOnPatient("Patient/pat1").withResourceTypes(resourceTypes);
|
||||
builder.allow().bulkExport().patientExportOnPatient("Patient/pat2").withResourceTypes(resourceTypes);
|
||||
List<IAuthRule> rules = builder.build();
|
||||
assertEquals(rules.size(),1);
|
||||
assertTrue(rules.get(0) instanceof RuleBulkExportImpl);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNullConditional() {
|
||||
IAuthRuleBuilder ruleBuilder = new RuleBuilder().allow().metadata().andThen();
|
||||
|
|
|
@ -249,4 +249,99 @@ public class RuleBulkExportImplTest {
|
|||
//Then: The patient IDs do NOT match so this is not permitted.
|
||||
assertEquals(PolicyEnum.DENY, verdict.getDecision());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPatientExportRulesOnTypeLevelExportUnpermittedPatient() {
|
||||
//Given
|
||||
final RuleBulkExportImpl myRule = new RuleBulkExportImpl("b");
|
||||
myRule.setAppliesToPatientExport("Patient/123");
|
||||
myRule.setMode(PolicyEnum.ALLOW);
|
||||
final BulkExportJobParameters options = new BulkExportJobParameters();
|
||||
options.setExportStyle(BulkExportJobParameters.ExportStyle.PATIENT);
|
||||
options.setPatientIds(Set.of("Patient/456"));
|
||||
options.setResourceTypes(Set.of("Patient"));
|
||||
when(myRequestDetails.getAttribute(any())).thenReturn(options);
|
||||
|
||||
//When
|
||||
final AuthorizationInterceptor.Verdict verdict = myRule.applyRule(myOperation, myRequestDetails, null, null, null, myRuleApplier, myFlags, myPointcut);
|
||||
|
||||
//Then: We do not have permissions on the requested patient so this is not permitted.
|
||||
assertEquals(PolicyEnum.DENY, verdict.getDecision());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPatientExportRulesOnTypeLevelExportPermittedPatient() {
|
||||
//Given
|
||||
final RuleBulkExportImpl myRule = new RuleBulkExportImpl("b");
|
||||
myRule.setAppliesToPatientExport("Patient/123");
|
||||
myRule.setMode(PolicyEnum.ALLOW);
|
||||
final BulkExportJobParameters options = new BulkExportJobParameters();
|
||||
options.setExportStyle(BulkExportJobParameters.ExportStyle.PATIENT);
|
||||
options.setPatientIds(Set.of("Patient/123"));
|
||||
options.setResourceTypes(Set.of("Patient"));
|
||||
when(myRequestDetails.getAttribute(any())).thenReturn(options);
|
||||
|
||||
//When
|
||||
final AuthorizationInterceptor.Verdict verdict = myRule.applyRule(myOperation, myRequestDetails, null, null, null, myRuleApplier, myFlags, myPointcut);
|
||||
|
||||
//Then: We have permissions on the requested patient so this is permitted.
|
||||
assertEquals(PolicyEnum.ALLOW, verdict.getDecision());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPatientExportRulesOnTypeLevelExportPermittedPatients() {
|
||||
//Given
|
||||
final RuleBulkExportImpl myRule = new RuleBulkExportImpl("b");
|
||||
myRule.setAppliesToPatientExport("Patient/123");
|
||||
myRule.setAppliesToPatientExport("Patient/456");
|
||||
myRule.setMode(PolicyEnum.ALLOW);
|
||||
final BulkExportJobParameters options = new BulkExportJobParameters();
|
||||
options.setExportStyle(BulkExportJobParameters.ExportStyle.PATIENT);
|
||||
options.setPatientIds(Set.of("Patient/123", "Patient/456"));
|
||||
options.setResourceTypes(Set.of("Patient"));
|
||||
when(myRequestDetails.getAttribute(any())).thenReturn(options);
|
||||
|
||||
//When
|
||||
final AuthorizationInterceptor.Verdict verdict = myRule.applyRule(myOperation, myRequestDetails, null, null, null, myRuleApplier, myFlags, myPointcut);
|
||||
|
||||
//Then: We have permissions on both requested patients so this is permitted.
|
||||
assertEquals(PolicyEnum.ALLOW, verdict.getDecision());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPatientExportRulesOnTypeLevelExportWithPermittedAndUnpermittedPatients() {
|
||||
//Given
|
||||
final RuleBulkExportImpl myRule = new RuleBulkExportImpl("b");
|
||||
myRule.setAppliesToPatientExport("Patient/123");
|
||||
myRule.setMode(PolicyEnum.ALLOW);
|
||||
final BulkExportJobParameters options = new BulkExportJobParameters();
|
||||
options.setExportStyle(BulkExportJobParameters.ExportStyle.PATIENT);
|
||||
options.setPatientIds(Set.of("Patient/123","Patient/456"));
|
||||
options.setResourceTypes(Set.of("Patient"));
|
||||
when(myRequestDetails.getAttribute(any())).thenReturn(options);
|
||||
|
||||
//When
|
||||
final AuthorizationInterceptor.Verdict verdict = myRule.applyRule(myOperation, myRequestDetails, null, null, null, myRuleApplier, myFlags, myPointcut);
|
||||
|
||||
//Then: There are unpermitted patients in the request so this is not permitted.
|
||||
assertEquals(PolicyEnum.DENY, verdict.getDecision());
|
||||
}
|
||||
@Test
|
||||
public void testPatientExportRulesOnTypeLevelExportWithPermittedAndUnpermittedPatientFilters() {
|
||||
//Given
|
||||
final RuleBulkExportImpl myRule = new RuleBulkExportImpl("b");
|
||||
myRule.setAppliesToPatientExport("Patient/123");
|
||||
myRule.setMode(PolicyEnum.ALLOW);
|
||||
final BulkExportJobParameters options = new BulkExportJobParameters();
|
||||
options.setExportStyle(BulkExportJobParameters.ExportStyle.PATIENT);
|
||||
options.setFilters(Set.of("Patient?_id=123","Patient?_id=456"));
|
||||
options.setResourceTypes(Set.of("Patient"));
|
||||
when(myRequestDetails.getAttribute(any())).thenReturn(options);
|
||||
|
||||
//When
|
||||
final AuthorizationInterceptor.Verdict verdict = myRule.applyRule(myOperation, myRequestDetails, null, null, null, myRuleApplier, myFlags, myPointcut);
|
||||
|
||||
//Then: There are unpermitted patients in the request so this is not permitted.
|
||||
assertEquals(PolicyEnum.DENY, verdict.getDecision());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ public class ForceIdMigrationCopyTask extends BaseTask {
|
|||
"update hfj_resource " + "set fhir_id = coalesce( "
|
||||
+ // use first non-null value: forced_id if present, otherwise res_id
|
||||
" (select f.forced_id from hfj_forced_id f where f.resource_pid = res_id), "
|
||||
+ " cast(res_id as char(64)) "
|
||||
+ " cast(res_id as varchar(64)) "
|
||||
+ " ) "
|
||||
+ "where fhir_id is null "
|
||||
+ "and res_id >= ? and res_id < ?",
|
||||
|
|
|
@ -0,0 +1,121 @@
|
|||
/*-
|
||||
* #%L
|
||||
* HAPI FHIR Server - SQL Migration
|
||||
* %%
|
||||
* Copyright (C) 2014 - 2023 Smile CDR, Inc.
|
||||
* %%
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
* #L%
|
||||
*/
|
||||
package ca.uhn.fhir.jpa.migrate.taskdef;
|
||||
|
||||
import org.apache.commons.lang3.builder.EqualsBuilder;
|
||||
import org.apache.commons.lang3.builder.HashCodeBuilder;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.jdbc.core.JdbcTemplate;
|
||||
|
||||
import java.sql.SQLException;
|
||||
|
||||
/**
|
||||
* Fix for bad version of {@link ForceIdMigrationCopyTask}
|
||||
* The earlier migration had used at cast to char instead of varchar, which is space-padded on Oracle.
|
||||
* This migration includes the copy action, but also adds a trim() call to fixup the bad server-assigned ids.
|
||||
*/
|
||||
public class ForceIdMigrationFixTask extends BaseTask {
|
||||
private static final Logger ourLog = LoggerFactory.getLogger(ForceIdMigrationFixTask.class);
|
||||
|
||||
public ForceIdMigrationFixTask(String theProductVersion, String theSchemaVersion) {
|
||||
super(theProductVersion, theSchemaVersion);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void validate() {
|
||||
// no-op
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute() throws SQLException {
|
||||
logInfo(ourLog, "Starting: migrate fhir_id from hfj_forced_id to hfj_resource.fhir_id");
|
||||
|
||||
JdbcTemplate jdbcTemplate = newJdbcTemplate();
|
||||
|
||||
Pair<Long, Long> range = jdbcTemplate.queryForObject(
|
||||
"select min(RES_ID), max(RES_ID) from HFJ_RESOURCE",
|
||||
(rs, rowNum) -> Pair.of(rs.getLong(1), rs.getLong(2)));
|
||||
|
||||
if (range == null || range.getLeft() == null) {
|
||||
logInfo(ourLog, "HFJ_RESOURCE is empty. No work to do.");
|
||||
return;
|
||||
}
|
||||
|
||||
// run update in batches.
|
||||
int rowsPerBlock = 50; // hfj_resource has roughly 50 rows per 8k block.
|
||||
int batchSize = rowsPerBlock * 2000; // a few thousand IOPS gives a batch size around a second.
|
||||
ourLog.info(
|
||||
"About to migrate ids from {} to {} in batches of size {}",
|
||||
range.getLeft(),
|
||||
range.getRight(),
|
||||
batchSize);
|
||||
for (long batchStart = range.getLeft(); batchStart <= range.getRight(); batchStart = batchStart + batchSize) {
|
||||
long batchEnd = batchStart + batchSize;
|
||||
ourLog.info("Migrating client-assigned ids for pids: {}-{}", batchStart, batchEnd);
|
||||
|
||||
/*
|
||||
We have several cases. Two require no action:
|
||||
1. client-assigned id, with correct value in fhir_id and row in hfj_forced_id
|
||||
2. server-assigned id, with correct value in fhir_id, no row in hfj_forced_id
|
||||
And three require action:
|
||||
3. client-assigned id, no value in fhir_id, but row in hfj_forced_id
|
||||
4. server-assigned id, no value in fhir_id, and row in hfj_forced_id
|
||||
5. bad migration - server-assigned id, with wrong space-padded value in fhir_id, no row in hfj_forced_id
|
||||
*/
|
||||
|
||||
executeSql(
|
||||
"hfj_resource",
|
||||
"update hfj_resource " +
|
||||
// coalesce is varargs and chooses the first non-null value, like ||
|
||||
" set fhir_id = coalesce( "
|
||||
+
|
||||
// case 5.
|
||||
" trim(fhir_id), "
|
||||
+
|
||||
// case 3
|
||||
" (select f.forced_id from hfj_forced_id f where f.resource_pid = res_id), "
|
||||
+
|
||||
// case 4 - use pid as fhir_id
|
||||
" cast(res_id as varchar(64)) "
|
||||
+ " ) "
|
||||
+
|
||||
// avoid useless updates on engines that don't check
|
||||
// skip case 1, 2. Only check 3,4,5
|
||||
" where (fhir_id is null or fhir_id <> trim(fhir_id)) "
|
||||
+
|
||||
// chunk range.
|
||||
" and res_id >= ? and res_id < ?",
|
||||
batchStart,
|
||||
batchEnd);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void generateHashCode(HashCodeBuilder theBuilder) {
|
||||
// no-op - this is a singleton.
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void generateEquals(EqualsBuilder theBuilder, BaseTask theOtherObject) {
|
||||
// no-op - this is a singleton.
|
||||
}
|
||||
}
|
|
@ -36,7 +36,7 @@ import ca.uhn.fhir.rest.server.exceptions.ResourceVersionConflictException;
|
|||
import ca.uhn.fhir.rest.server.servlet.ServletRequestDetails;
|
||||
import ca.uhn.fhir.rest.server.util.CompositeInterceptorBroadcaster;
|
||||
import ca.uhn.fhir.util.ICallable;
|
||||
import ca.uhn.fhir.util.TestUtil;
|
||||
import ca.uhn.fhir.util.SleepUtil;
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import jakarta.annotation.Nonnull;
|
||||
import jakarta.annotation.Nullable;
|
||||
|
@ -48,6 +48,7 @@ import org.slf4j.Logger;
|
|||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.dao.DataIntegrityViolationException;
|
||||
import org.springframework.dao.PessimisticLockingFailureException;
|
||||
import org.springframework.orm.ObjectOptimisticLockingFailureException;
|
||||
import org.springframework.transaction.PlatformTransactionManager;
|
||||
import org.springframework.transaction.TransactionStatus;
|
||||
|
@ -89,11 +90,18 @@ public class HapiTransactionService implements IHapiTransactionService {
|
|||
|
||||
private Propagation myTransactionPropagationWhenChangingPartitions = Propagation.REQUIRED;
|
||||
|
||||
private SleepUtil mySleepUtil = new SleepUtil();
|
||||
|
||||
@VisibleForTesting
|
||||
public void setInterceptorBroadcaster(IInterceptorBroadcaster theInterceptorBroadcaster) {
|
||||
myInterceptorBroadcaster = theInterceptorBroadcaster;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public void setSleepUtil(SleepUtil theSleepUtil) {
|
||||
mySleepUtil = theSleepUtil;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IExecutionBuilder withRequest(@Nullable RequestDetails theRequestDetails) {
|
||||
return new ExecutionBuilder(theRequestDetails);
|
||||
|
@ -281,6 +289,25 @@ public class HapiTransactionService implements IHapiTransactionService {
|
|||
return doExecuteInTransaction(theExecutionBuilder, theCallback, requestPartitionId, previousRequestPartitionId);
|
||||
}
|
||||
|
||||
private boolean isThrowableOrItsSubclassPresent(Throwable theThrowable, Class<? extends Throwable> theClass) {
|
||||
return ExceptionUtils.indexOfType(theThrowable, theClass) != -1;
|
||||
}
|
||||
|
||||
private boolean isThrowablePresent(Throwable theThrowable, Class<? extends Throwable> theClass) {
|
||||
return ExceptionUtils.indexOfThrowable(theThrowable, theClass) != -1;
|
||||
}
|
||||
|
||||
private boolean isRetriable(Throwable theThrowable) {
|
||||
return isThrowablePresent(theThrowable, ResourceVersionConflictException.class)
|
||||
|| isThrowablePresent(theThrowable, DataIntegrityViolationException.class)
|
||||
|| isThrowablePresent(theThrowable, ConstraintViolationException.class)
|
||||
|| isThrowablePresent(theThrowable, ObjectOptimisticLockingFailureException.class)
|
||||
// calling isThrowableOrItsSubclassPresent instead of isThrowablePresent for
|
||||
// PessimisticLockingFailureException, because we want to retry on its subclasses as well, especially
|
||||
// CannotAcquireLockException, which is thrown in some deadlock situations which we want to retry
|
||||
|| isThrowableOrItsSubclassPresent(theThrowable, PessimisticLockingFailureException.class);
|
||||
}
|
||||
|
||||
@Nullable
|
||||
private <T> T doExecuteInTransaction(
|
||||
ExecutionBuilder theExecutionBuilder,
|
||||
|
@ -294,11 +321,7 @@ public class HapiTransactionService implements IHapiTransactionService {
|
|||
return doExecuteCallback(theExecutionBuilder, theCallback);
|
||||
|
||||
} catch (Exception e) {
|
||||
if (!(ExceptionUtils.indexOfThrowable(e, ResourceVersionConflictException.class) != -1
|
||||
|| ExceptionUtils.indexOfThrowable(e, DataIntegrityViolationException.class) != -1
|
||||
|| ExceptionUtils.indexOfThrowable(e, ConstraintViolationException.class) != -1
|
||||
|| ExceptionUtils.indexOfThrowable(e, ObjectOptimisticLockingFailureException.class)
|
||||
!= -1)) {
|
||||
if (!isRetriable(e)) {
|
||||
ourLog.debug("Unexpected transaction exception. Will not be retried.", e);
|
||||
throw e;
|
||||
} else {
|
||||
|
@ -354,7 +377,7 @@ public class HapiTransactionService implements IHapiTransactionService {
|
|||
}
|
||||
double sleepAmount = (250.0d * i) * Math.random();
|
||||
long sleepAmountLong = (long) sleepAmount;
|
||||
TestUtil.sleepAtLeast(sleepAmountLong, false);
|
||||
mySleepUtil.sleepAtLeast(sleepAmountLong, false);
|
||||
|
||||
ourLog.info(
|
||||
"About to start a transaction retry due to conflict or constraint error. Sleeping {}ms first.",
|
||||
|
|
|
@ -0,0 +1,194 @@
|
|||
package ca.uhn.fhir.jpa.dao.tx;
|
||||
|
||||
import ca.uhn.fhir.interceptor.api.HookParams;
|
||||
import ca.uhn.fhir.interceptor.api.IInterceptorBroadcaster;
|
||||
import ca.uhn.fhir.interceptor.api.Pointcut;
|
||||
import ca.uhn.fhir.jpa.api.model.ResourceVersionConflictResolutionStrategy;
|
||||
import ca.uhn.fhir.jpa.model.config.PartitionSettings;
|
||||
import ca.uhn.fhir.jpa.partition.IRequestPartitionHelperSvc;
|
||||
import ca.uhn.fhir.rest.api.server.RequestDetails;
|
||||
import ca.uhn.fhir.rest.api.server.SystemRequestDetails;
|
||||
import ca.uhn.fhir.rest.api.server.storage.TransactionDetails;
|
||||
import ca.uhn.fhir.rest.server.exceptions.ResourceVersionConflictException;
|
||||
import ca.uhn.fhir.util.SleepUtil;
|
||||
import org.hibernate.exception.ConstraintViolationException;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
import org.junit.jupiter.params.ParameterizedTest;
|
||||
import org.junit.jupiter.params.provider.Arguments;
|
||||
import org.junit.jupiter.params.provider.MethodSource;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.junit.jupiter.MockitoExtension;
|
||||
import org.springframework.dao.CannotAcquireLockException;
|
||||
import org.springframework.dao.DataIntegrityViolationException;
|
||||
import org.springframework.orm.ObjectOptimisticLockingFailureException;
|
||||
import org.springframework.transaction.PlatformTransactionManager;
|
||||
import org.springframework.transaction.TransactionStatus;
|
||||
import org.springframework.transaction.support.TransactionCallback;
|
||||
|
||||
import java.sql.SQLException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.*;
|
||||
import static org.junit.jupiter.params.provider.Arguments.arguments;
|
||||
import static org.mockito.ArgumentMatchers.anyBoolean;
|
||||
import static org.mockito.ArgumentMatchers.anyLong;
|
||||
import static org.mockito.ArgumentMatchers.eq;
|
||||
import static org.mockito.ArgumentMatchers.isA;
|
||||
import static org.mockito.Mockito.lenient;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.verifyNoInteractions;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
@ExtendWith(MockitoExtension.class)
|
||||
class HapiTransactionServiceTest {
|
||||
|
||||
@Mock
|
||||
private IInterceptorBroadcaster myInterceptorBroadcasterMock;
|
||||
|
||||
@Mock
|
||||
private PlatformTransactionManager myTransactionManagerMock;
|
||||
|
||||
@Mock
|
||||
private IRequestPartitionHelperSvc myRequestPartitionHelperSvcMock;
|
||||
@Mock
|
||||
private PartitionSettings myPartitionSettingsMock;
|
||||
|
||||
@Mock
|
||||
private SleepUtil mySleepUtilMock;
|
||||
|
||||
private HapiTransactionService myHapiTransactionService;
|
||||
|
||||
|
||||
@BeforeEach
|
||||
public void beforeEach() {
|
||||
myHapiTransactionService = new HapiTransactionService();
|
||||
myHapiTransactionService.setTransactionManager(myTransactionManagerMock);
|
||||
myHapiTransactionService.setInterceptorBroadcaster(myInterceptorBroadcasterMock);
|
||||
myHapiTransactionService.setPartitionSettingsForUnitTest(myPartitionSettingsMock);
|
||||
myHapiTransactionService.setRequestPartitionSvcForUnitTest(myRequestPartitionHelperSvcMock);
|
||||
myHapiTransactionService.setSleepUtil(mySleepUtilMock);
|
||||
mockInterceptorBroadcaster();
|
||||
}
|
||||
|
||||
private void mockInterceptorBroadcaster() {
|
||||
lenient().when(myInterceptorBroadcasterMock.callHooksAndReturnObject(eq(Pointcut.STORAGE_VERSION_CONFLICT),
|
||||
isA(HookParams.class)))
|
||||
.thenAnswer(invocationOnMock -> {
|
||||
HookParams hookParams = (HookParams) invocationOnMock.getArguments()[1];
|
||||
//answer with whatever retry settings passed in as HookParam
|
||||
RequestDetails requestDetails = hookParams.get(RequestDetails.class);
|
||||
ResourceVersionConflictResolutionStrategy answer = new ResourceVersionConflictResolutionStrategy();
|
||||
answer.setRetry(requestDetails.isRetry());
|
||||
answer.setMaxRetries(requestDetails.getMaxRetries());
|
||||
return answer;
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* A helper method to test retry logic on exceptions
|
||||
* TransactionCallback interface allows only throwing RuntimeExceptions,
|
||||
* that's why the parameter type is RunTimeException
|
||||
*/
|
||||
private Exception testRetriesOnException(RuntimeException theException,
|
||||
boolean theRetryEnabled,
|
||||
int theMaxRetries,
|
||||
int theExpectedNumberOfCallsToTransactionCallback) {
|
||||
RequestDetails requestDetails = new SystemRequestDetails();
|
||||
requestDetails.setRetry(theRetryEnabled);
|
||||
requestDetails.setMaxRetries(theMaxRetries);
|
||||
|
||||
HapiTransactionService.IExecutionBuilder executionBuilder = myHapiTransactionService
|
||||
.withRequest(requestDetails)
|
||||
.withTransactionDetails(new TransactionDetails());
|
||||
|
||||
AtomicInteger numberOfCalls = new AtomicInteger();
|
||||
TransactionCallback<Void> transactionCallback = (TransactionStatus theStatus) -> {
|
||||
numberOfCalls.incrementAndGet();
|
||||
throw theException;
|
||||
};
|
||||
|
||||
Exception theExceptionThrownByDoExecute = assertThrows(Exception.class, () -> {
|
||||
myHapiTransactionService.doExecute((HapiTransactionService.ExecutionBuilder) executionBuilder, transactionCallback);
|
||||
});
|
||||
|
||||
assertEquals(theExpectedNumberOfCallsToTransactionCallback, numberOfCalls.get());
|
||||
verify(mySleepUtilMock, times(theExpectedNumberOfCallsToTransactionCallback - 1))
|
||||
.sleepAtLeast(anyLong(), anyBoolean());
|
||||
return theExceptionThrownByDoExecute;
|
||||
}
|
||||
|
||||
private static Stream<Arguments> provideRetriableExceptionParameters() {
|
||||
String exceptionMessage = "failed!";
|
||||
return Stream.of(
|
||||
arguments(new ResourceVersionConflictException(exceptionMessage)),
|
||||
arguments(new DataIntegrityViolationException(exceptionMessage)),
|
||||
arguments(new ConstraintViolationException(exceptionMessage, new SQLException(""), null)),
|
||||
arguments(new ObjectOptimisticLockingFailureException(exceptionMessage, new Exception())),
|
||||
//CannotAcquireLockException is a subclass of
|
||||
//PessimisticLockingFailureException which we treat as a retriable exception
|
||||
arguments(new CannotAcquireLockException(exceptionMessage))
|
||||
);
|
||||
}
|
||||
|
||||
@ParameterizedTest(name = "{index}: {0}")
|
||||
@MethodSource(value = "provideRetriableExceptionParameters")
|
||||
void testDoExecute_WhenRetryEnabled_RetriesOnRetriableExceptions(RuntimeException theException) {
|
||||
testRetriesOnException(theException, true, 2, 3);
|
||||
}
|
||||
|
||||
|
||||
@ParameterizedTest(name = "{index}: {0}")
|
||||
@MethodSource(value = "provideRetriableExceptionParameters")
|
||||
void testDoExecute_WhenRetryEnabled_RetriesOnRetriableInnerExceptions(RuntimeException theException) {
|
||||
//in this test we wrap the retriable exception to test that nested exceptions are covered as well
|
||||
RuntimeException theWrapperException = new RuntimeException("this is the wrapper", theException);
|
||||
testRetriesOnException(theWrapperException, true, 2, 3);
|
||||
}
|
||||
|
||||
@ParameterizedTest(name = "{index}: {0}")
|
||||
@MethodSource(value = "provideRetriableExceptionParameters")
|
||||
void testDoExecute_WhenRetryIsDisabled_DoesNotRetryExceptions(RuntimeException theException) {
|
||||
testRetriesOnException(theException, false, 10, 1);
|
||||
}
|
||||
|
||||
@Test
|
||||
void testDoExecute_WhenRetryEnabled_DoesNotRetryOnNonRetriableException() {
|
||||
RuntimeException nonRetriableException = new RuntimeException("should not be retried");
|
||||
Exception exceptionThrown = testRetriesOnException(nonRetriableException, true, 10, 1);
|
||||
assertEquals(nonRetriableException, exceptionThrown);
|
||||
verifyNoInteractions(myInterceptorBroadcasterMock);
|
||||
}
|
||||
|
||||
@Test
|
||||
void testDoExecute_WhenRetyEnabled_StopsRetryingWhenARetryIsSuccessfull() {
|
||||
RequestDetails requestDetails = new SystemRequestDetails();
|
||||
requestDetails.setRetry(true);
|
||||
requestDetails.setMaxRetries(10);
|
||||
|
||||
HapiTransactionService.IExecutionBuilder executionBuilder = myHapiTransactionService
|
||||
.withRequest(requestDetails)
|
||||
.withTransactionDetails(new TransactionDetails());
|
||||
|
||||
AtomicInteger numberOfCalls = new AtomicInteger();
|
||||
TransactionCallback<Void> transactionCallback = (TransactionStatus theStatus) -> {
|
||||
int currentCallNum = numberOfCalls.incrementAndGet();
|
||||
//fail for the first two calls then succeed on the third
|
||||
if (currentCallNum < 3) {
|
||||
// using ResourceVersionConflictException, since it is a retriable exception
|
||||
throw new ResourceVersionConflictException("failed");
|
||||
}
|
||||
return null;
|
||||
};
|
||||
|
||||
myHapiTransactionService.doExecute((HapiTransactionService.ExecutionBuilder) executionBuilder, transactionCallback);
|
||||
|
||||
assertEquals(3, numberOfCalls.get());
|
||||
verify(mySleepUtilMock, times(2))
|
||||
.sleepAtLeast(anyLong(), anyBoolean());
|
||||
}
|
||||
}
|
|
@ -71,7 +71,7 @@
|
|||
<version>${project.version}</version>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
|
||||
|
||||
<dependency>
|
||||
<groupId>commons-codec</groupId>
|
||||
<artifactId>commons-codec</artifactId>
|
||||
|
|
Loading…
Reference in New Issue