Ks 20221031 migration lock (#4224)

* started design

* complete with tests

* changelog

* cleanup

* tyop

Co-authored-by: Ken Stevens <ken@smilecdr.com>
This commit is contained in:
Ken Stevens 2022-11-01 10:38:30 -04:00 committed by GitHub
parent 6a657d46da
commit a0183608f9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 109 additions and 18 deletions

View File

@ -0,0 +1,4 @@
---
type: add
issue: 4224
title: "Added new System Property called 'CLEAR_LOCK_TABLE_WITH_DESCRIPTION' that when set to the uuid of a lock record, will clear that lock record before attempting to insert a new one."

View File

@ -21,18 +21,25 @@ package ca.uhn.fhir.jpa.migrate;
*/
import ca.uhn.fhir.i18n.Msg;
import ca.uhn.fhir.jpa.migrate.entity.HapiMigrationEntity;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Optional;
import java.util.UUID;
import static org.apache.commons.lang3.StringUtils.isBlank;
/**
* The approach used in this class is borrowed from org.flywaydb.community.database.ignite.thin.IgniteThinDatabase
*/
public class HapiMigrationLock implements AutoCloseable {
static final Integer LOCK_PID = -100;
private static final Logger ourLog = LoggerFactory.getLogger(HapiMigrationLock.class);
public static final int SLEEP_MILLIS_BETWEEN_LOCK_RETRIES = 1000;
public static final int MAX_RETRY_ATTEMPTS = 50;
public static final int DEFAULT_MAX_RETRY_ATTEMPTS = 50;
public static int ourMaxRetryAttempts = DEFAULT_MAX_RETRY_ATTEMPTS;
public static final String CLEAR_LOCK_TABLE_WITH_DESCRIPTION = "CLEAR_LOCK_TABLE_WITH_DESCRIPTION";
private final String myLockDescription = UUID.randomUUID().toString();
@ -47,6 +54,7 @@ public class HapiMigrationLock implements AutoCloseable {
}
private void lock() {
cleanLockTableIfRequested();
int retryCount = 0;
do {
@ -55,24 +63,57 @@ public class HapiMigrationLock implements AutoCloseable {
return;
}
retryCount++;
ourLog.info("Waiting for lock on {}. Retry {}/{}", myMigrationStorageSvc.getMigrationTablename(), retryCount, MAX_RETRY_ATTEMPTS);
Thread.sleep(SLEEP_MILLIS_BETWEEN_LOCK_RETRIES);
if (retryCount < ourMaxRetryAttempts) {
ourLog.info("Waiting for lock on {}. Retry {}/{}", myMigrationStorageSvc.getMigrationTablename(), retryCount, ourMaxRetryAttempts);
Thread.sleep(SLEEP_MILLIS_BETWEEN_LOCK_RETRIES);
}
} catch (InterruptedException ex) {
// Ignore - if interrupted, we still need to wait for lock to become available
}
} while (retryCount < MAX_RETRY_ATTEMPTS);
} while (retryCount < ourMaxRetryAttempts);
throw new HapiMigrationException(Msg.code(2153) + "Unable to obtain table lock - another database migration may be running. If no " +
String message = "Unable to obtain table lock - another database migration may be running. If no " +
"other database migration is running, then the previous migration did not shut down properly and the " +
"lock record needs to be deleted manually. The lock record is located in the " + myMigrationStorageSvc.getMigrationTablename() + " table with " +
"INSTALLED_RANK = " + HapiMigrationStorageSvc.LOCK_PID);
"INSTALLED_RANK = " + LOCK_PID;
Optional<HapiMigrationEntity> otherLockFound = myMigrationStorageSvc.findFirstByPidAndNotDescription(LOCK_PID, myLockDescription);
if (otherLockFound.isPresent()) {
message += " and DESCRIPTION = " + otherLockFound.get().getDescription();
}
throw new HapiMigrationException(Msg.code(2153) + message);
}
/**
*
* @return whether a lock record was successfully deleted
*/
boolean cleanLockTableIfRequested() {
String description = System.getProperty(CLEAR_LOCK_TABLE_WITH_DESCRIPTION);
if (isBlank(description)) {
description = System.getenv(CLEAR_LOCK_TABLE_WITH_DESCRIPTION);
}
if (isBlank(description)) {
return false;
}
ourLog.info("Repairing lock table. Removing row in " + myMigrationStorageSvc.getMigrationTablename() + " with INSTALLED_RANK = " + LOCK_PID + " and DESCRIPTION = " + description);
boolean result = myMigrationStorageSvc.deleteLockRecord(description);
if (result) {
ourLog.info("Successfully removed lock record");
} else {
ourLog.info("No lock record found");
}
return result;
}
private boolean insertLockingRow() {
try {
return myMigrationStorageSvc.insertLockRecord(myLockDescription);
} catch (Exception e) {
ourLog.warn("Failed to insert lock record: {}", e.getMessage());
ourLog.debug("Failed to insert lock record: {}", e.getMessage());
return false;
}
}
@ -84,4 +125,8 @@ public class HapiMigrationLock implements AutoCloseable {
ourLog.error("Failed to delete migration lock record for description = [{}]", myLockDescription);
}
}
public static void setMaxRetryAttempts(int theMaxRetryAttempts) {
ourMaxRetryAttempts = theMaxRetryAttempts;
}
}

View File

@ -32,7 +32,6 @@ import java.util.Set;
public class HapiMigrationStorageSvc {
public static final String UNKNOWN_VERSION = "unknown";
private static final String LOCK_TYPE = "hapi-fhir-lock";
static final Integer LOCK_PID = -100;
private final HapiMigrationDao myHapiMigrationDao;
@ -104,11 +103,11 @@ public class HapiMigrationStorageSvc {
verifyNoOtherLocksPresent(theLockDescription);
// Remove the locking row
return myHapiMigrationDao.deleteLockRecord(LOCK_PID, theLockDescription);
return myHapiMigrationDao.deleteLockRecord(HapiMigrationLock.LOCK_PID, theLockDescription);
}
void verifyNoOtherLocksPresent(String theLockDescription) {
Optional<HapiMigrationEntity> otherLockFound = myHapiMigrationDao.findFirstByPidAndNotDescription(LOCK_PID, theLockDescription);
Optional<HapiMigrationEntity> otherLockFound = myHapiMigrationDao.findFirstByPidAndNotDescription(HapiMigrationLock.LOCK_PID, theLockDescription);
// Check that there are no other locks in place. This should not happen!
if (otherLockFound.isPresent()) {
@ -118,7 +117,7 @@ public class HapiMigrationStorageSvc {
public boolean insertLockRecord(String theLockDescription) {
HapiMigrationEntity entity = new HapiMigrationEntity();
entity.setPid(LOCK_PID);
entity.setPid(HapiMigrationLock.LOCK_PID);
entity.setType(LOCK_TYPE);
entity.setDescription(theLockDescription);
entity.setExecutionTime(0);
@ -126,4 +125,8 @@ public class HapiMigrationStorageSvc {
return myHapiMigrationDao.save(entity);
}
public Optional<HapiMigrationEntity> findFirstByPidAndNotDescription(Integer theLockPid, String theLockDescription) {
return myHapiMigrationDao.findFirstByPidAndNotDescription(theLockPid, theLockDescription);
}
}

View File

@ -1,9 +1,9 @@
package ca.uhn.fhir.jpa.migrate;
import ca.uhn.fhir.interceptor.api.HookParams;
import ca.uhn.fhir.jpa.migrate.dao.MigrationQueryBuilder;
import ca.uhn.fhir.jpa.migrate.entity.HapiMigrationEntity;
import ca.uhn.fhir.jpa.migrate.dao.HapiMigrationDao;
import ca.uhn.fhir.jpa.migrate.taskdef.BaseTask;
import ca.uhn.fhir.jpa.migrate.taskdef.NopTask;
import ca.uhn.test.concurrency.IPointcutLatch;
import ca.uhn.test.concurrency.PointcutLatch;
import org.apache.commons.dbcp2.BasicDataSource;
@ -18,6 +18,7 @@ import org.springframework.jdbc.core.JdbcTemplate;
import javax.annotation.Nonnull;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
@ -27,6 +28,7 @@ import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.hasSize;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
class HapiMigratorIT {
private static final Logger ourLog = LoggerFactory.getLogger(HapiMigratorIT.class);
@ -34,6 +36,7 @@ class HapiMigratorIT {
private final BasicDataSource myDataSource = BaseMigrationTest.getDataSource();
private final JdbcTemplate myJdbcTemplate = new JdbcTemplate(myDataSource);
private HapiMigrationStorageSvc myMigrationStorageSvc;
@BeforeEach
void before() {
@ -41,12 +44,17 @@ class HapiMigratorIT {
migrator.createMigrationTableIfRequired();
Integer count = myJdbcTemplate.queryForObject("SELECT COUNT(*) FROM " + MIGRATION_TABLENAME, Integer.class);
assertTrue(count > 0);
HapiMigrationDao migrationDao = new HapiMigrationDao(myDataSource, DriverTypeEnum.H2_EMBEDDED, MIGRATION_TABLENAME);
myMigrationStorageSvc = new HapiMigrationStorageSvc(migrationDao);
}
@AfterEach
void after() {
myJdbcTemplate.execute("DROP TABLE " + MIGRATION_TABLENAME);
assertEquals(0, myDataSource.getNumActive());
HapiMigrationLock.setMaxRetryAttempts(HapiMigrationLock.DEFAULT_MAX_RETRY_ATTEMPTS);
System.clearProperty(HapiMigrationLock.CLEAR_LOCK_TABLE_WITH_DESCRIPTION);
}
@Test
@ -78,8 +86,7 @@ class HapiMigratorIT {
LatchMigrationTask latchMigrationTask2 = new LatchMigrationTask("second new", "2");
LatchMigrationTask latchMigrationTask3 = new LatchMigrationTask("third repeat", "1");
HapiMigrator migrator2 = buildMigrator(latchMigrationTask2);
migrator2.addTask(latchMigrationTask3);
HapiMigrator migrator2 = buildMigrator(latchMigrationTask2, latchMigrationTask3);
// We only expect the first migration to run because the second one will block on the lock and by the time the lock
// is released, the first one will have already run so there will be nothing to do
@ -141,14 +148,46 @@ class HapiMigratorIT {
}
@Test
void test_oldLockFails_block() {
HapiMigrationLock.setMaxRetryAttempts(0);
String description = UUID.randomUUID().toString();
HapiMigrator migrator = buildMigrator();
myMigrationStorageSvc.insertLockRecord(description);
try {
migrator.migrate();
fail();
} catch (HapiMigrationException e) {
assertEquals("HAPI-2153: Unable to obtain table lock - another database migration may be running. If no other database migration is running, then the previous migration did not shut down properly and the lock record needs to be deleted manually. The lock record is located in the TEST_MIGRATOR_TABLE table with INSTALLED_RANK = -100 and DESCRIPTION = " + description, e.getMessage());
}
}
@Test
void test_oldLockWithSystemProperty_cleared() {
HapiMigrationLock.setMaxRetryAttempts(0);
String description = UUID.randomUUID().toString();
HapiMigrator migrator = buildMigrator(new NopTask("1", "1"));
myMigrationStorageSvc.insertLockRecord(description);
System.setProperty(HapiMigrationLock.CLEAR_LOCK_TABLE_WITH_DESCRIPTION, description);
MigrationResult result = migrator.migrate();
assertThat(result.succeededTasks, hasSize(1));
}
private int countLockRecords() {
return myJdbcTemplate.queryForObject("SELECT COUNT(*) FROM " + MIGRATION_TABLENAME + " WHERE \"installed_rank\" = " + HapiMigrationStorageSvc.LOCK_PID, Integer.class);
return myJdbcTemplate.queryForObject("SELECT COUNT(*) FROM " + MIGRATION_TABLENAME + " WHERE \"installed_rank\" = " + HapiMigrationLock.LOCK_PID, Integer.class);
}
@Nonnull
private HapiMigrator buildMigrator(LatchMigrationTask theLatchMigrationTask) {
private HapiMigrator buildMigrator(BaseTask... theTasks) {
HapiMigrator retval = buildMigrator();
retval.addTask(theLatchMigrationTask);
for (BaseTask next : theTasks) {
retval.addTask(next);
}
return retval;
}