Merge pull request #1270 from DressedUpZebra/setJournalDiskSyncStrategy-in-unit-tests

[AMQ-9542] Replace use of deprecated setEnableJournalDiskSyncs
This commit is contained in:
Christopher L. Shannon 2024-12-18 18:25:24 -05:00 committed by GitHub
commit 322c84d5c2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 10 additions and 12 deletions

View File

@ -38,6 +38,7 @@ import jakarta.jms.TextMessage;
import org.apache.activemq.ActiveMQConnectionFactory;
import org.apache.activemq.broker.BrokerService;
import org.apache.activemq.store.kahadb.KahaDBStore;
import org.apache.activemq.store.kahadb.disk.journal.Journal.JournalDiskSyncStrategy;
import org.apache.activemq.util.IOHelper;
import org.junit.After;
import org.junit.Before;
@ -185,7 +186,7 @@ public class AMQ2512Test {
KahaDBStore kaha = new KahaDBStore();
kaha.setDirectory(dataFileDir);
kaha.setEnableJournalDiskSyncs(false);
kaha.setJournalDiskSyncStrategy(JournalDiskSyncStrategy.NEVER.name());
BrokerService answer = new BrokerService();
answer.setPersistenceAdapter(kaha);

View File

@ -34,6 +34,7 @@ import org.apache.activemq.broker.region.policy.PolicyEntry;
import org.apache.activemq.broker.region.policy.PolicyMap;
import org.apache.activemq.command.ActiveMQQueue;
import org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter;
import org.apache.activemq.store.kahadb.disk.journal.Journal.JournalDiskSyncStrategy;
import org.apache.activemq.util.IOHelper;
import org.apache.activemq.util.Wait;
import org.junit.After;
@ -92,7 +93,7 @@ public class AMQ2616Test {
brokerService = new BrokerService();
KahaDBPersistenceAdapter adaptor = new KahaDBPersistenceAdapter();
adaptor.setEnableJournalDiskSyncs(false);
adaptor.setJournalDiskSyncStrategy(JournalDiskSyncStrategy.NEVER.name());
File file = new File("target/AMQ2616Test");
IOHelper.mkdirs(file);
IOHelper.deleteChildren(file);

View File

@ -20,6 +20,7 @@ import junit.framework.TestCase;
import org.apache.activemq.ActiveMQConnectionFactory;
import org.apache.activemq.broker.BrokerService;
import org.apache.activemq.store.kahadb.KahaDBStore;
import org.apache.activemq.store.kahadb.disk.journal.Journal.JournalDiskSyncStrategy;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -132,10 +133,7 @@ public class VerifySteadyEnqueueRate extends TestCase {
KahaDBStore kaha = new KahaDBStore();
kaha.setDirectory(new File("target/activemq-data/kahadb"));
// The setEnableJournalDiskSyncs(false) setting is a little dangerous right now, as I have not verified
// what happens if the index is updated but a journal update is lost.
// Index is going to be in consistent, but can it be repaired?
kaha.setEnableJournalDiskSyncs(false);
kaha.setJournalDiskSyncStrategy(JournalDiskSyncStrategy.NEVER.name());
// Using a bigger journal file size makes he take fewer spikes as it is not switching files as often.
kaha.setJournalMaxFileLength(1024*1024*100);

View File

@ -19,6 +19,7 @@ package org.apache.activemq.perf;
import java.io.File;
import org.apache.activemq.broker.BrokerService;
import org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter;
import org.apache.activemq.store.kahadb.disk.journal.Journal.JournalDiskSyncStrategy;
/**
*
@ -40,11 +41,7 @@ public class KahaDBQueueTest extends SimpleQueueTest {
kaha.setDirectory(dataFileDir);
kaha.setDirectoryArchive(archiveDir);
kaha.setArchiveDataLogs(false);
// The setEnableJournalDiskSyncs(false) setting is a little dangerous right now, as I have not verified
// what happens if the index is updated but a journal update is lost.
// Index is going to be in consistent, but can it be repaired?
kaha.setEnableJournalDiskSyncs(true);
kaha.setJournalDiskSyncStrategy(JournalDiskSyncStrategy.NEVER.name());
// Using a bigger journal file size makes he take fewer spikes as it is not switching files as often.
//kaha.setJournalMaxFileLength(1024*1024*100);

View File

@ -42,6 +42,7 @@ import org.apache.activemq.command.ActiveMQQueue;
import org.apache.activemq.command.ConnectionControl;
import org.apache.activemq.store.kahadb.disk.journal.FileAppender;
import org.apache.activemq.store.kahadb.disk.journal.Journal;
import org.apache.activemq.store.kahadb.disk.journal.Journal.JournalDiskSyncStrategy;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@ -213,7 +214,7 @@ public class KahaDBFastEnqueueTest {
broker = new BrokerService();
broker.setDeleteAllMessagesOnStartup(deleteAllMessages);
kahaDBPersistenceAdapter = (KahaDBPersistenceAdapter)broker.getPersistenceAdapter();
kahaDBPersistenceAdapter.setEnableJournalDiskSyncs(false);
kahaDBPersistenceAdapter.setJournalDiskSyncStrategy(JournalDiskSyncStrategy.NEVER.name());
// defer checkpoints which require a sync
kahaDBPersistenceAdapter.setCleanupInterval(checkPointPeriod);
kahaDBPersistenceAdapter.setCheckpointInterval(checkPointPeriod);