STORE_TYPE_LIST = new ArrayList<>();
+
+ static {
+ STORE_TYPE_LIST.add("database-store");
+ STORE_TYPE_LIST.add("file-store");
+ }
+
+ private void parseStoreConfiguration(final Element e, final Configuration mainConfig) {
+ for (String storeType : STORE_TYPE_LIST) {
+ NodeList storeNodeList = e.getElementsByTagName(storeType);
+ if (storeNodeList.getLength() > 0) {
+ Element storeNode = (Element) storeNodeList.item(0);
+ if (storeNode.getTagName().equals("database-store")) {
+ mainConfig.setStoreConfiguration(createDatabaseStoreConfig(storeNode));
+ }
+ else if (storeNode.getTagName().equals("file-store")) {
+ mainConfig.setStoreConfiguration(createFileStoreConfig(storeNode));
+ }
+ }
+ }
+ }
+
private void parseHAPolicyConfiguration(final Element e, final Configuration mainConfig) {
for (String haType : HA_LIST) {
NodeList haNodeList = e.getElementsByTagName(haType);
@@ -1105,6 +1135,20 @@ public final class FileConfigurationParser extends XMLConfigurationUtil {
return null;
}
+ private DatabaseStorageConfiguration createDatabaseStoreConfig(Element storeNode) {
+ NodeList databaseStoreNode = storeNode.getElementsByTagName("database-store");
+
+ DatabaseStorageConfiguration conf = new DatabaseStorageConfiguration();
+ conf.setBindingsTableName(getString(storeNode, "bindings-table-name", conf.getBindingsTableName(), Validators.NO_CHECK));
+ conf.setMessageTableName(getString(storeNode, "message-table-name", conf.getMessageTableName(), Validators.NO_CHECK));
+ conf.setJdbcConnectionUrl(getString(storeNode, "jdbc-connection-url", conf.getJdbcConnectionUrl(), Validators.NO_CHECK));
+ return conf;
+ }
+
+ private FileStorageConfiguration createFileStoreConfig(Element storeNode) {
+ return new FileStorageConfiguration();
+ }
+
private void parseBroadcastGroupConfiguration(final Element e, final Configuration mainConfig) {
String name = e.getAttribute("name");
diff --git a/artemis-server/src/main/java/org/apache/activemq/artemis/core/persistence/impl/journal/AbstractJournalStorageManager.java b/artemis-server/src/main/java/org/apache/activemq/artemis/core/persistence/impl/journal/AbstractJournalStorageManager.java
new file mode 100644
index 0000000000..be457c4d5c
--- /dev/null
+++ b/artemis-server/src/main/java/org/apache/activemq/artemis/core/persistence/impl/journal/AbstractJournalStorageManager.java
@@ -0,0 +1,1828 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.activemq.artemis.core.persistence.impl.journal;
+
+import javax.transaction.xa.Xid;
+import java.io.File;
+import java.io.FileInputStream;
+import java.security.AccessController;
+import java.security.DigestInputStream;
+import java.security.InvalidParameterException;
+import java.security.MessageDigest;
+import java.security.PrivilegedAction;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Executor;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.apache.activemq.artemis.api.core.ActiveMQBuffer;
+import org.apache.activemq.artemis.api.core.ActiveMQBuffers;
+import org.apache.activemq.artemis.api.core.Pair;
+import org.apache.activemq.artemis.api.core.SimpleString;
+import org.apache.activemq.artemis.core.config.Configuration;
+import org.apache.activemq.artemis.core.filter.Filter;
+import org.apache.activemq.artemis.core.io.IOCallback;
+import org.apache.activemq.artemis.core.io.IOCriticalErrorListener;
+import org.apache.activemq.artemis.core.journal.Journal;
+import org.apache.activemq.artemis.core.journal.JournalLoadInformation;
+import org.apache.activemq.artemis.core.journal.PreparedTransactionInfo;
+import org.apache.activemq.artemis.core.journal.RecordInfo;
+import org.apache.activemq.artemis.core.paging.PageTransactionInfo;
+import org.apache.activemq.artemis.core.paging.PagingManager;
+import org.apache.activemq.artemis.core.paging.PagingStore;
+import org.apache.activemq.artemis.core.paging.cursor.PagePosition;
+import org.apache.activemq.artemis.core.paging.cursor.PageSubscription;
+import org.apache.activemq.artemis.core.paging.cursor.PagedReferenceImpl;
+import org.apache.activemq.artemis.core.paging.impl.PageTransactionInfoImpl;
+import org.apache.activemq.artemis.core.persistence.GroupingInfo;
+import org.apache.activemq.artemis.core.persistence.OperationContext;
+import org.apache.activemq.artemis.core.persistence.QueueBindingInfo;
+import org.apache.activemq.artemis.core.persistence.StorageManager;
+import org.apache.activemq.artemis.core.persistence.config.PersistedAddressSetting;
+import org.apache.activemq.artemis.core.persistence.config.PersistedRoles;
+import org.apache.activemq.artemis.core.persistence.impl.PageCountPending;
+import org.apache.activemq.artemis.core.persistence.impl.journal.codec.CursorAckRecordEncoding;
+import org.apache.activemq.artemis.core.persistence.impl.journal.codec.DeleteEncoding;
+import org.apache.activemq.artemis.core.persistence.impl.journal.codec.DeliveryCountUpdateEncoding;
+import org.apache.activemq.artemis.core.persistence.impl.journal.codec.DuplicateIDEncoding;
+import org.apache.activemq.artemis.core.persistence.impl.journal.codec.FinishPageMessageOperation;
+import org.apache.activemq.artemis.core.persistence.impl.journal.codec.GroupingEncoding;
+import org.apache.activemq.artemis.core.persistence.impl.journal.codec.HeuristicCompletionEncoding;
+import org.apache.activemq.artemis.core.persistence.impl.journal.codec.LargeMessageEncoding;
+import org.apache.activemq.artemis.core.persistence.impl.journal.codec.PageCountPendingImpl;
+import org.apache.activemq.artemis.core.persistence.impl.journal.codec.PageCountRecord;
+import org.apache.activemq.artemis.core.persistence.impl.journal.codec.PageCountRecordInc;
+import org.apache.activemq.artemis.core.persistence.impl.journal.codec.PageUpdateTXEncoding;
+import org.apache.activemq.artemis.core.persistence.impl.journal.codec.PendingLargeMessageEncoding;
+import org.apache.activemq.artemis.core.persistence.impl.journal.codec.PersistentQueueBindingEncoding;
+import org.apache.activemq.artemis.core.persistence.impl.journal.codec.RefEncoding;
+import org.apache.activemq.artemis.core.persistence.impl.journal.codec.ScheduledDeliveryEncoding;
+import org.apache.activemq.artemis.core.persistence.impl.journal.codec.XidEncoding;
+import org.apache.activemq.artemis.core.postoffice.Binding;
+import org.apache.activemq.artemis.core.postoffice.DuplicateIDCache;
+import org.apache.activemq.artemis.core.postoffice.PostOffice;
+import org.apache.activemq.artemis.core.server.ActiveMQMessageBundle;
+import org.apache.activemq.artemis.core.server.ActiveMQServerLogger;
+import org.apache.activemq.artemis.core.server.LargeServerMessage;
+import org.apache.activemq.artemis.core.server.MessageReference;
+import org.apache.activemq.artemis.core.server.Queue;
+import org.apache.activemq.artemis.core.server.RouteContextList;
+import org.apache.activemq.artemis.core.server.ServerMessage;
+import org.apache.activemq.artemis.core.server.group.impl.GroupBinding;
+import org.apache.activemq.artemis.core.server.impl.JournalLoader;
+import org.apache.activemq.artemis.core.server.impl.ServerMessageImpl;
+import org.apache.activemq.artemis.core.transaction.ResourceManager;
+import org.apache.activemq.artemis.core.transaction.Transaction;
+import org.apache.activemq.artemis.core.transaction.TransactionPropertyIndexes;
+import org.apache.activemq.artemis.core.transaction.impl.TransactionImpl;
+import org.apache.activemq.artemis.utils.ActiveMQThreadFactory;
+import org.apache.activemq.artemis.utils.Base64;
+import org.apache.activemq.artemis.utils.ExecutorFactory;
+import org.apache.activemq.artemis.utils.IDGenerator;
+
+import static org.apache.activemq.artemis.core.persistence.impl.journal.JournalRecordIds.ACKNOWLEDGE_CURSOR;
+import static org.apache.activemq.artemis.core.persistence.impl.journal.JournalRecordIds.ADD_LARGE_MESSAGE_PENDING;
+import static org.apache.activemq.artemis.core.persistence.impl.journal.JournalRecordIds.DUPLICATE_ID;
+import static org.apache.activemq.artemis.core.persistence.impl.journal.JournalRecordIds.PAGE_CURSOR_COUNTER_INC;
+import static org.apache.activemq.artemis.core.persistence.impl.journal.JournalRecordIds.PAGE_CURSOR_COUNTER_VALUE;
+import static org.apache.activemq.artemis.core.persistence.impl.journal.JournalRecordIds.SET_SCHEDULED_DELIVERY_TIME;
+
+/**
+ * Controls access to the journals and other storage files such as the ones used to store pages and
+ * large messages. This class must control writing of any non-transient data, as it is the key point
+ * for synchronizing any replicating backup server.
+ *
+ * Using this class also ensures that locks are acquired in the right order, avoiding dead-locks.
+ */
+public abstract class AbstractJournalStorageManager implements StorageManager {
+
+ public enum JournalContent {
+ BINDINGS((byte) 0), MESSAGES((byte) 1);
+
+ public final byte typeByte;
+
+ JournalContent(byte b) {
+ typeByte = b;
+ }
+
+ public static JournalContent getType(byte type) {
+ if (MESSAGES.typeByte == type)
+ return MESSAGES;
+ if (BINDINGS.typeByte == type)
+ return BINDINGS;
+ throw new InvalidParameterException("invalid byte: " + type);
+ }
+ }
+
+ private static final long CHECKPOINT_BATCH_SIZE = Integer.MAX_VALUE;
+
+ protected Semaphore pageMaxConcurrentIO;
+
+ protected BatchingIDGenerator idGenerator;
+
+ protected final ReentrantReadWriteLock storageManagerLock = new ReentrantReadWriteLock(true);
+
+ protected Journal messageJournal;
+
+ protected Journal bindingsJournal;
+
+ protected volatile boolean started;
+
+ /**
+ * Used to create Operation Contexts
+ */
+ private final ExecutorFactory executorFactory;
+
+ final Executor executor;
+
+ ExecutorService singleThreadExecutor;
+
+ private final boolean syncTransactional;
+
+ private final boolean syncNonTransactional;
+
+ protected int perfBlastPages = -1;
+
+ protected boolean journalLoaded = false;
+
+ private final IOCriticalErrorListener ioCriticalErrorListener;
+
+ protected final Configuration config;
+
+ // Persisted core configuration
+ protected final Map mapPersistedRoles = new ConcurrentHashMap<>();
+
+ protected final Map mapPersistedAddressSettings = new ConcurrentHashMap<>();
+
+ protected final Set largeMessagesToDelete = new HashSet<>();
+
+ public AbstractJournalStorageManager(final Configuration config, final ExecutorFactory executorFactory) {
+ this(config, executorFactory, null);
+ }
+
+ public AbstractJournalStorageManager(Configuration config,
+ ExecutorFactory executorFactory,
+ IOCriticalErrorListener criticalErrorListener) {
+ this.executorFactory = executorFactory;
+
+ this.ioCriticalErrorListener = criticalErrorListener;
+
+ this.config = config;
+
+ executor = executorFactory.getExecutor();
+
+ syncNonTransactional = config.isJournalSyncNonTransactional();
+ syncTransactional = config.isJournalSyncTransactional();
+
+ init(config, criticalErrorListener);
+
+ idGenerator = new BatchingIDGenerator(0, CHECKPOINT_BATCH_SIZE, this);
+ }
+
+ /**
+ * Called during initialization. Used by implementations to setup Journals, Stores etc...
+ * @param config
+ * @param criticalErrorListener
+ */
+ protected abstract void init(Configuration config, IOCriticalErrorListener criticalErrorListener);
+
+ @Override
+ public void criticalError(Throwable error) {
+ ioCriticalErrorListener.onIOException(error, error.getMessage(), null);
+ }
+
+ @Override
+ public void clearContext() {
+ OperationContextImpl.clearContext();
+ }
+
+ public static String md5(File file) {
+ try {
+ byte[] buffer = new byte[1 << 4];
+ MessageDigest md = MessageDigest.getInstance("MD5");
+
+ FileInputStream is = new FileInputStream(file);
+ DigestInputStream is2 = new DigestInputStream(is, md);
+ while (is2.read(buffer) > 0) {
+ continue;
+ }
+ byte[] digest = md.digest();
+ is.close();
+ is2.close();
+ return Base64.encodeBytes(digest);
+ }
+ catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ public IDGenerator getIDGenerator() {
+ return idGenerator;
+ }
+
+ @Override
+ public final void waitOnOperations() throws Exception {
+ if (!started) {
+ ActiveMQServerLogger.LOGGER.serverIsStopped();
+ throw new IllegalStateException("Server is stopped");
+ }
+ waitOnOperations(0);
+ }
+
+ @Override
+ public final boolean waitOnOperations(final long timeout) throws Exception {
+ if (!started) {
+ ActiveMQServerLogger.LOGGER.serverIsStopped();
+ throw new IllegalStateException("Server is stopped");
+ }
+ return getContext().waitCompletion(timeout);
+ }
+
+ public OperationContext getContext() {
+ return OperationContextImpl.getContext(executorFactory);
+ }
+
+ public void setContext(final OperationContext context) {
+ OperationContextImpl.setContext(context);
+ }
+
+ public Executor getSingleThreadExecutor() {
+ return singleThreadExecutor;
+ }
+
+ public OperationContext newSingleThreadContext() {
+ return newContext(singleThreadExecutor);
+ }
+
+ public OperationContext newContext(final Executor executor1) {
+ return new OperationContextImpl(executor1);
+ }
+
+ public void afterCompleteOperations(final IOCallback run) {
+ getContext().executeOnCompletion(run);
+ }
+
+ public long generateID() {
+ return idGenerator.generateID();
+ }
+
+ public long getCurrentID() {
+ return idGenerator.getCurrentID();
+ }
+
+ // Non transactional operations
+
+
+ public void confirmPendingLargeMessageTX(final Transaction tx, long messageID, long recordID) throws Exception {
+ readLock();
+ try {
+ installLargeMessageConfirmationOnTX(tx, recordID);
+ messageJournal.appendDeleteRecordTransactional(tx.getID(), recordID, new DeleteEncoding(JournalRecordIds.ADD_LARGE_MESSAGE_PENDING, messageID));
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ /**
+ * We don't need messageID now but we are likely to need it we ever decide to support a database
+ */
+ public void confirmPendingLargeMessage(long recordID) throws Exception {
+ readLock();
+ try {
+ messageJournal.appendDeleteRecord(recordID, true, getContext());
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void storeMessage(final ServerMessage message) throws Exception {
+ if (message.getMessageID() <= 0) {
+ // Sanity check only... this shouldn't happen unless there is a bug
+ throw ActiveMQMessageBundle.BUNDLE.messageIdNotAssigned();
+ }
+
+ readLock();
+ try {
+ // Note that we don't sync, the add reference that comes immediately after will sync if
+ // appropriate
+
+ if (message.isLargeMessage()) {
+ messageJournal.appendAddRecord(message.getMessageID(), JournalRecordIds.ADD_LARGE_MESSAGE, new LargeMessageEncoding((LargeServerMessage) message), false, getContext(false));
+ }
+ else {
+ messageJournal.appendAddRecord(message.getMessageID(), JournalRecordIds.ADD_MESSAGE, message, false, getContext(false));
+ }
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void storeReference(final long queueID, final long messageID, final boolean last) throws Exception {
+ readLock();
+ try {
+ messageJournal.appendUpdateRecord(messageID, JournalRecordIds.ADD_REF, new RefEncoding(queueID), last && syncNonTransactional, getContext(last && syncNonTransactional));
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ @Override
+ public void readLock() {
+ storageManagerLock.readLock().lock();
+ }
+
+ @Override
+ public void readUnLock() {
+ storageManagerLock.readLock().unlock();
+ }
+
+ public void storeAcknowledge(final long queueID, final long messageID) throws Exception {
+ readLock();
+ try {
+ messageJournal.appendUpdateRecord(messageID, JournalRecordIds.ACKNOWLEDGE_REF, new RefEncoding(queueID), syncNonTransactional, getContext(syncNonTransactional));
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void storeCursorAcknowledge(long queueID, PagePosition position) throws Exception {
+ readLock();
+ try {
+ long ackID = idGenerator.generateID();
+ position.setRecordID(ackID);
+ messageJournal.appendAddRecord(ackID, JournalRecordIds.ACKNOWLEDGE_CURSOR, new CursorAckRecordEncoding(queueID, position), syncNonTransactional, getContext(syncNonTransactional));
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void deleteMessage(final long messageID) throws Exception {
+ readLock();
+ try {
+ // Messages are deleted on postACK, one after another.
+ // If these deletes are synchronized, we would build up messages on the Executor
+ // increasing chances of losing deletes.
+ // The StorageManager should verify messages without references
+ messageJournal.appendDeleteRecord(messageID, false, getContext(false));
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void updateScheduledDeliveryTime(final MessageReference ref) throws Exception {
+ ScheduledDeliveryEncoding encoding = new ScheduledDeliveryEncoding(ref.getScheduledDeliveryTime(), ref.getQueue().getID());
+ readLock();
+ try {
+ messageJournal.appendUpdateRecord(ref.getMessage().getMessageID(), JournalRecordIds.SET_SCHEDULED_DELIVERY_TIME, encoding, syncNonTransactional, getContext(syncNonTransactional));
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void storeDuplicateID(final SimpleString address, final byte[] duplID, final long recordID) throws Exception {
+ readLock();
+ try {
+ DuplicateIDEncoding encoding = new DuplicateIDEncoding(address, duplID);
+
+ messageJournal.appendAddRecord(recordID, JournalRecordIds.DUPLICATE_ID, encoding, syncNonTransactional, getContext(syncNonTransactional));
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void deleteDuplicateID(final long recordID) throws Exception {
+ readLock();
+ try {
+ messageJournal.appendDeleteRecord(recordID, syncNonTransactional, getContext(syncNonTransactional));
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ // Transactional operations
+
+ public void storeMessageTransactional(final long txID, final ServerMessage message) throws Exception {
+ if (message.getMessageID() <= 0) {
+ throw ActiveMQMessageBundle.BUNDLE.messageIdNotAssigned();
+ }
+
+ readLock();
+ try {
+ if (message.isLargeMessage()) {
+ messageJournal.appendAddRecordTransactional(txID, message.getMessageID(), JournalRecordIds.ADD_LARGE_MESSAGE, new LargeMessageEncoding(((LargeServerMessage) message)));
+ }
+ else {
+ messageJournal.appendAddRecordTransactional(txID, message.getMessageID(), JournalRecordIds.ADD_MESSAGE, message);
+ }
+
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void storePageTransaction(final long txID, final PageTransactionInfo pageTransaction) throws Exception {
+ readLock();
+ try {
+ pageTransaction.setRecordID(generateID());
+ messageJournal.appendAddRecordTransactional(txID, pageTransaction.getRecordID(), JournalRecordIds.PAGE_TRANSACTION, pageTransaction);
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void updatePageTransaction(final long txID,
+ final PageTransactionInfo pageTransaction,
+ final int depages) throws Exception {
+ readLock();
+ try {
+ messageJournal.appendUpdateRecordTransactional(txID, pageTransaction.getRecordID(), JournalRecordIds.PAGE_TRANSACTION, new PageUpdateTXEncoding(pageTransaction.getTransactionID(), depages));
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void updatePageTransaction(final PageTransactionInfo pageTransaction, final int depages) throws Exception {
+ readLock();
+ try {
+ messageJournal.appendUpdateRecord(pageTransaction.getRecordID(), JournalRecordIds.PAGE_TRANSACTION, new PageUpdateTXEncoding(pageTransaction.getTransactionID(), depages), syncNonTransactional, getContext(syncNonTransactional));
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void storeReferenceTransactional(final long txID, final long queueID, final long messageID) throws Exception {
+ readLock();
+ try {
+ messageJournal.appendUpdateRecordTransactional(txID, messageID, JournalRecordIds.ADD_REF, new RefEncoding(queueID));
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void storeAcknowledgeTransactional(final long txID,
+ final long queueID,
+ final long messageID) throws Exception {
+ readLock();
+ try {
+ messageJournal.appendUpdateRecordTransactional(txID, messageID, JournalRecordIds.ACKNOWLEDGE_REF, new RefEncoding(queueID));
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void storeCursorAcknowledgeTransactional(long txID, long queueID, PagePosition position) throws Exception {
+ readLock();
+ try {
+ long ackID = idGenerator.generateID();
+ position.setRecordID(ackID);
+ messageJournal.appendAddRecordTransactional(txID, ackID, JournalRecordIds.ACKNOWLEDGE_CURSOR, new CursorAckRecordEncoding(queueID, position));
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void storePageCompleteTransactional(long txID, long queueID, PagePosition position) throws Exception {
+ long recordID = idGenerator.generateID();
+ position.setRecordID(recordID);
+ messageJournal.appendAddRecordTransactional(txID, recordID, JournalRecordIds.PAGE_CURSOR_COMPLETE, new CursorAckRecordEncoding(queueID, position));
+ }
+
+ public void deletePageComplete(long ackID) throws Exception {
+ messageJournal.appendDeleteRecord(ackID, false);
+ }
+
+ public void deleteCursorAcknowledgeTransactional(long txID, long ackID) throws Exception {
+ readLock();
+ try {
+ messageJournal.appendDeleteRecordTransactional(txID, ackID);
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void deleteCursorAcknowledge(long ackID) throws Exception {
+ messageJournal.appendDeleteRecord(ackID, false);
+ }
+
+ public long storeHeuristicCompletion(final Xid xid, final boolean isCommit) throws Exception {
+ readLock();
+ try {
+ long id = generateID();
+
+ messageJournal.appendAddRecord(id, JournalRecordIds.HEURISTIC_COMPLETION, new HeuristicCompletionEncoding(xid, isCommit), true, getContext(true));
+ return id;
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void deleteHeuristicCompletion(final long id) throws Exception {
+ readLock();
+ try {
+
+ messageJournal.appendDeleteRecord(id, true, getContext(true));
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void deletePageTransactional(final long recordID) throws Exception {
+ readLock();
+ try {
+ messageJournal.appendDeleteRecord(recordID, false);
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void updateScheduledDeliveryTimeTransactional(final long txID, final MessageReference ref) throws Exception {
+ ScheduledDeliveryEncoding encoding = new ScheduledDeliveryEncoding(ref.getScheduledDeliveryTime(), ref.getQueue().getID());
+ readLock();
+ try {
+
+ messageJournal.appendUpdateRecordTransactional(txID, ref.getMessage().getMessageID(), JournalRecordIds.SET_SCHEDULED_DELIVERY_TIME, encoding);
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void prepare(final long txID, final Xid xid) throws Exception {
+ readLock();
+ try {
+ messageJournal.appendPrepareRecord(txID, new XidEncoding(xid), syncTransactional, getContext(syncTransactional));
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void commit(final long txID) throws Exception {
+ commit(txID, true);
+ }
+
+ public void commitBindings(final long txID) throws Exception {
+ bindingsJournal.appendCommitRecord(txID, true);
+ }
+
+ public void rollbackBindings(final long txID) throws Exception {
+ // no need to sync, it's going away anyways
+ bindingsJournal.appendRollbackRecord(txID, false);
+ }
+
+ public void commit(final long txID, final boolean lineUpContext) throws Exception {
+ readLock();
+ try {
+ messageJournal.appendCommitRecord(txID, syncTransactional, getContext(syncTransactional), lineUpContext);
+ if (!lineUpContext && !syncTransactional) {
+ /**
+ * If {@code lineUpContext == false}, it means that we have previously lined up a
+ * context somewhere else (specifically see @{link TransactionImpl#asyncAppendCommit}),
+ * hence we need to mark it as done even if {@code syncTransactional = false} as in this
+ * case {@code getContext(syncTransactional=false)} would pass a dummy context to the
+ * {@code messageJournal.appendCommitRecord(...)} call above.
+ */
+ getContext(true).done();
+ }
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void rollback(final long txID) throws Exception {
+ readLock();
+ try {
+ messageJournal.appendRollbackRecord(txID, syncTransactional, getContext(syncTransactional));
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void storeDuplicateIDTransactional(final long txID,
+ final SimpleString address,
+ final byte[] duplID,
+ final long recordID) throws Exception {
+ DuplicateIDEncoding encoding = new DuplicateIDEncoding(address, duplID);
+
+ readLock();
+ try {
+ messageJournal.appendAddRecordTransactional(txID, recordID, JournalRecordIds.DUPLICATE_ID, encoding);
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void updateDuplicateIDTransactional(final long txID,
+ final SimpleString address,
+ final byte[] duplID,
+ final long recordID) throws Exception {
+ DuplicateIDEncoding encoding = new DuplicateIDEncoding(address, duplID);
+
+ readLock();
+ try {
+ messageJournal.appendUpdateRecordTransactional(txID, recordID, JournalRecordIds.DUPLICATE_ID, encoding);
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void deleteDuplicateIDTransactional(final long txID, final long recordID) throws Exception {
+ readLock();
+ try {
+ messageJournal.appendDeleteRecordTransactional(txID, recordID);
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ // Other operations
+
+ public void updateDeliveryCount(final MessageReference ref) throws Exception {
+ // no need to store if it's the same value
+ // otherwise the journal will get OME in case of lots of redeliveries
+ if (ref.getDeliveryCount() == ref.getPersistedCount()) {
+ return;
+ }
+
+ ref.setPersistedCount(ref.getDeliveryCount());
+ DeliveryCountUpdateEncoding updateInfo = new DeliveryCountUpdateEncoding(ref.getQueue().getID(), ref.getDeliveryCount());
+
+ readLock();
+ try {
+ messageJournal.appendUpdateRecord(ref.getMessage().getMessageID(), JournalRecordIds.UPDATE_DELIVERY_COUNT, updateInfo, syncNonTransactional, getContext(syncNonTransactional));
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void storeAddressSetting(PersistedAddressSetting addressSetting) throws Exception {
+ deleteAddressSetting(addressSetting.getAddressMatch());
+ readLock();
+ try {
+ long id = idGenerator.generateID();
+ addressSetting.setStoreId(id);
+ bindingsJournal.appendAddRecord(id, JournalRecordIds.ADDRESS_SETTING_RECORD, addressSetting, true);
+ mapPersistedAddressSettings.put(addressSetting.getAddressMatch(), addressSetting);
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public List recoverAddressSettings() throws Exception {
+ return new ArrayList<>(mapPersistedAddressSettings.values());
+ }
+
+ public List recoverPersistedRoles() throws Exception {
+ return new ArrayList<>(mapPersistedRoles.values());
+ }
+
+ public void storeSecurityRoles(PersistedRoles persistedRoles) throws Exception {
+
+ deleteSecurityRoles(persistedRoles.getAddressMatch());
+ readLock();
+ try {
+ final long id = idGenerator.generateID();
+ persistedRoles.setStoreId(id);
+ bindingsJournal.appendAddRecord(id, JournalRecordIds.SECURITY_RECORD, persistedRoles, true);
+ mapPersistedRoles.put(persistedRoles.getAddressMatch(), persistedRoles);
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ @Override
+ public void storeID(final long journalID, final long id) throws Exception {
+ readLock();
+ try {
+ bindingsJournal.appendAddRecord(journalID, JournalRecordIds.ID_COUNTER_RECORD, BatchingIDGenerator.createIDEncodingSupport(id), true);
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ @Override
+ public void deleteID(long journalD) throws Exception {
+ readLock();
+ try {
+ bindingsJournal.appendDeleteRecord(journalD, false);
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void deleteAddressSetting(SimpleString addressMatch) throws Exception {
+ PersistedAddressSetting oldSetting = mapPersistedAddressSettings.remove(addressMatch);
+ if (oldSetting != null) {
+ readLock();
+ try {
+ bindingsJournal.appendDeleteRecord(oldSetting.getStoreId(), false);
+ }
+ finally {
+ readUnLock();
+ }
+ }
+ }
+
+ public void deleteSecurityRoles(SimpleString addressMatch) throws Exception {
+ PersistedRoles oldRoles = mapPersistedRoles.remove(addressMatch);
+ if (oldRoles != null) {
+ readLock();
+ try {
+ bindingsJournal.appendDeleteRecord(oldRoles.getStoreId(), false);
+ }
+ finally {
+ readUnLock();
+ }
+ }
+ }
+
+ @Override
+ public JournalLoadInformation loadMessageJournal(final PostOffice postOffice,
+ final PagingManager pagingManager,
+ final ResourceManager resourceManager,
+ Map queueInfos,
+ final Map>> duplicateIDMap,
+ final Set> pendingLargeMessages,
+ List pendingNonTXPageCounter,
+ final JournalLoader journalLoader) throws Exception {
+ List records = new ArrayList<>();
+
+ List preparedTransactions = new ArrayList<>();
+
+ Map messages = new HashMap<>();
+ readLock();
+ try {
+
+ JournalLoadInformation info = messageJournal.load(records, preparedTransactions, new LargeMessageTXFailureCallback(this, messages));
+
+ ArrayList largeMessages = new ArrayList<>();
+
+ Map> queueMap = new HashMap<>();
+
+ Map pageSubscriptions = new HashMap<>();
+
+ final int totalSize = records.size();
+
+ for (int reccount = 0; reccount < totalSize; reccount++) {
+ // It will show log.info only with large journals (more than 1 million records)
+ if (reccount > 0 && reccount % 1000000 == 0) {
+ long percent = (long) ((((double) reccount) / ((double) totalSize)) * 100f);
+
+ ActiveMQServerLogger.LOGGER.percentLoaded(percent);
+ }
+
+ RecordInfo record = records.get(reccount);
+ byte[] data = record.data;
+
+ ActiveMQBuffer buff = ActiveMQBuffers.wrappedBuffer(data);
+
+ byte recordType = record.getUserRecordType();
+
+ switch (recordType) {
+ case JournalRecordIds.ADD_LARGE_MESSAGE_PENDING: {
+ PendingLargeMessageEncoding pending = new PendingLargeMessageEncoding();
+
+ pending.decode(buff);
+
+ if (pendingLargeMessages != null) {
+ // it could be null on tests, and we don't need anything on that case
+ pendingLargeMessages.add(new Pair<>(record.id, pending.largeMessageID));
+ }
+ break;
+ }
+ case JournalRecordIds.ADD_LARGE_MESSAGE: {
+ LargeServerMessage largeMessage = parseLargeMessage(messages, buff);
+
+ messages.put(record.id, largeMessage);
+
+ largeMessages.add(largeMessage);
+
+ break;
+ }
+ case JournalRecordIds.ADD_MESSAGE: {
+ ServerMessage message = new ServerMessageImpl(record.id, 50);
+
+ message.decode(buff);
+
+ messages.put(record.id, message);
+
+ break;
+ }
+ case JournalRecordIds.ADD_REF: {
+ long messageID = record.id;
+
+ RefEncoding encoding = new RefEncoding();
+
+ encoding.decode(buff);
+
+ Map queueMessages = queueMap.get(encoding.queueID);
+
+ if (queueMessages == null) {
+ queueMessages = new LinkedHashMap<>();
+
+ queueMap.put(encoding.queueID, queueMessages);
+ }
+
+ ServerMessage message = messages.get(messageID);
+
+ if (message == null) {
+ ActiveMQServerLogger.LOGGER.cannotFindMessage(record.id);
+ }
+ else {
+ queueMessages.put(messageID, new AddMessageRecord(message));
+ }
+
+ break;
+ }
+ case JournalRecordIds.ACKNOWLEDGE_REF: {
+ long messageID = record.id;
+
+ RefEncoding encoding = new RefEncoding();
+
+ encoding.decode(buff);
+
+ Map queueMessages = queueMap.get(encoding.queueID);
+
+ if (queueMessages == null) {
+ ActiveMQServerLogger.LOGGER.journalCannotFindQueue(encoding.queueID, messageID);
+ }
+ else {
+ AddMessageRecord rec = queueMessages.remove(messageID);
+
+ if (rec == null) {
+ ActiveMQServerLogger.LOGGER.cannotFindMessage(messageID);
+ }
+ }
+
+ break;
+ }
+ case JournalRecordIds.UPDATE_DELIVERY_COUNT: {
+ long messageID = record.id;
+
+ DeliveryCountUpdateEncoding encoding = new DeliveryCountUpdateEncoding();
+
+ encoding.decode(buff);
+
+ Map queueMessages = queueMap.get(encoding.queueID);
+
+ if (queueMessages == null) {
+ ActiveMQServerLogger.LOGGER.journalCannotFindQueueDelCount(encoding.queueID);
+ }
+ else {
+ AddMessageRecord rec = queueMessages.get(messageID);
+
+ if (rec == null) {
+ ActiveMQServerLogger.LOGGER.journalCannotFindMessageDelCount(messageID);
+ }
+ else {
+ rec.setDeliveryCount(encoding.count);
+ }
+ }
+
+ break;
+ }
+ case JournalRecordIds.PAGE_TRANSACTION: {
+ if (record.isUpdate) {
+ PageUpdateTXEncoding pageUpdate = new PageUpdateTXEncoding();
+
+ pageUpdate.decode(buff);
+
+ PageTransactionInfo pageTX = pagingManager.getTransaction(pageUpdate.pageTX);
+
+ pageTX.onUpdate(pageUpdate.recods, null, null);
+ }
+ else {
+ PageTransactionInfoImpl pageTransactionInfo = new PageTransactionInfoImpl();
+
+ pageTransactionInfo.decode(buff);
+
+ pageTransactionInfo.setRecordID(record.id);
+
+ pagingManager.addTransaction(pageTransactionInfo);
+ }
+
+ break;
+ }
+ case JournalRecordIds.SET_SCHEDULED_DELIVERY_TIME: {
+ long messageID = record.id;
+
+ ScheduledDeliveryEncoding encoding = new ScheduledDeliveryEncoding();
+
+ encoding.decode(buff);
+
+ Map queueMessages = queueMap.get(encoding.queueID);
+
+ if (queueMessages == null) {
+ ActiveMQServerLogger.LOGGER.journalCannotFindQueueScheduled(encoding.queueID, messageID);
+ }
+ else {
+
+ AddMessageRecord rec = queueMessages.get(messageID);
+
+ if (rec == null) {
+ ActiveMQServerLogger.LOGGER.cannotFindMessage(messageID);
+ }
+ else {
+ rec.setScheduledDeliveryTime(encoding.scheduledDeliveryTime);
+ }
+ }
+
+ break;
+ }
+ case JournalRecordIds.DUPLICATE_ID: {
+ DuplicateIDEncoding encoding = new DuplicateIDEncoding();
+
+ encoding.decode(buff);
+
+ List> ids = duplicateIDMap.get(encoding.address);
+
+ if (ids == null) {
+ ids = new ArrayList<>();
+
+ duplicateIDMap.put(encoding.address, ids);
+ }
+
+ ids.add(new Pair<>(encoding.duplID, record.id));
+
+ break;
+ }
+ case JournalRecordIds.HEURISTIC_COMPLETION: {
+ HeuristicCompletionEncoding encoding = new HeuristicCompletionEncoding();
+ encoding.decode(buff);
+ resourceManager.putHeuristicCompletion(record.id, encoding.xid, encoding.isCommit);
+ break;
+ }
+ case JournalRecordIds.ACKNOWLEDGE_CURSOR: {
+ CursorAckRecordEncoding encoding = new CursorAckRecordEncoding();
+ encoding.decode(buff);
+
+ encoding.position.setRecordID(record.id);
+
+ PageSubscription sub = locateSubscription(encoding.queueID, pageSubscriptions, queueInfos, pagingManager);
+
+ if (sub != null) {
+ sub.reloadACK(encoding.position);
+ }
+ else {
+ ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloading(encoding.queueID);
+ messageJournal.appendDeleteRecord(record.id, false);
+
+ }
+
+ break;
+ }
+ case JournalRecordIds.PAGE_CURSOR_COUNTER_VALUE: {
+ PageCountRecord encoding = new PageCountRecord();
+
+ encoding.decode(buff);
+
+ PageSubscription sub = locateSubscription(encoding.getQueueID(), pageSubscriptions, queueInfos, pagingManager);
+
+ if (sub != null) {
+ sub.getCounter().loadValue(record.id, encoding.getValue());
+ }
+ else {
+ ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloadingPage(encoding.getQueueID());
+ messageJournal.appendDeleteRecord(record.id, false);
+ }
+
+ break;
+ }
+
+ case JournalRecordIds.PAGE_CURSOR_COUNTER_INC: {
+ PageCountRecordInc encoding = new PageCountRecordInc();
+
+ encoding.decode(buff);
+
+ PageSubscription sub = locateSubscription(encoding.getQueueID(), pageSubscriptions, queueInfos, pagingManager);
+
+ if (sub != null) {
+ sub.getCounter().loadInc(record.id, encoding.getValue());
+ }
+ else {
+ ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloadingPageCursor(encoding.getQueueID());
+ messageJournal.appendDeleteRecord(record.id, false);
+ }
+
+ break;
+ }
+
+ case JournalRecordIds.PAGE_CURSOR_COMPLETE: {
+ CursorAckRecordEncoding encoding = new CursorAckRecordEncoding();
+ encoding.decode(buff);
+
+ encoding.position.setRecordID(record.id);
+
+ PageSubscription sub = locateSubscription(encoding.queueID, pageSubscriptions, queueInfos, pagingManager);
+
+ if (sub != null) {
+ sub.reloadPageCompletion(encoding.position);
+ }
+ else {
+ ActiveMQServerLogger.LOGGER.cantFindQueueOnPageComplete(encoding.queueID);
+ messageJournal.appendDeleteRecord(record.id, false);
+ }
+
+ break;
+ }
+
+ case JournalRecordIds.PAGE_CURSOR_PENDING_COUNTER: {
+
+ PageCountPendingImpl pendingCountEncoding = new PageCountPendingImpl();
+ pendingCountEncoding.decode(buff);
+ pendingCountEncoding.setID(record.id);
+
+ // This can be null on testcases not interested on this outcome
+ if (pendingNonTXPageCounter != null) {
+ pendingNonTXPageCounter.add(pendingCountEncoding);
+ }
+ break;
+ }
+
+ default: {
+ throw new IllegalStateException("Invalid record type " + recordType);
+ }
+ }
+
+ // This will free up memory sooner. The record is not needed any more
+ // and its byte array would consume memory during the load process even though it's not necessary any longer
+ // what would delay processing time during load
+ records.set(reccount, null);
+ }
+
+ // Release the memory as soon as not needed any longer
+ records.clear();
+ records = null;
+
+ journalLoader.handleAddMessage(queueMap);
+
+ loadPreparedTransactions(postOffice, pagingManager, resourceManager, queueInfos, preparedTransactions, duplicateIDMap, pageSubscriptions, pendingLargeMessages, journalLoader);
+
+ for (PageSubscription sub : pageSubscriptions.values()) {
+ sub.getCounter().processReload();
+ }
+
+ for (LargeServerMessage msg : largeMessages) {
+ if (msg.getRefCount() == 0) {
+ ActiveMQServerLogger.LOGGER.largeMessageWithNoRef(msg.getMessageID());
+ msg.decrementDelayDeletionCount();
+ }
+ }
+
+ journalLoader.handleNoMessageReferences(messages);
+
+ // To recover positions on Iterators
+ if (pagingManager != null) {
+ // it could be null on certain tests that are not dealing with paging
+ // This could also be the case in certain embedded conditions
+ pagingManager.processReload();
+ }
+
+ if (perfBlastPages != -1) {
+ messageJournal.perfBlast(perfBlastPages);
+ }
+
+ journalLoader.postLoad(messageJournal, resourceManager, duplicateIDMap);
+ journalLoaded = true;
+ return info;
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ /**
+ * @param queueID
+ * @param pageSubscriptions
+ * @param queueInfos
+ * @return
+ */
+ private static PageSubscription locateSubscription(final long queueID,
+ final Map pageSubscriptions,
+ final Map queueInfos,
+ final PagingManager pagingManager) throws Exception {
+
+ PageSubscription subs = pageSubscriptions.get(queueID);
+ if (subs == null) {
+ QueueBindingInfo queueInfo = queueInfos.get(queueID);
+
+ if (queueInfo != null) {
+ SimpleString address = queueInfo.getAddress();
+ PagingStore store = pagingManager.getPageStore(address);
+ subs = store.getCursorProvider().getSubscription(queueID);
+ pageSubscriptions.put(queueID, subs);
+ }
+ }
+
+ return subs;
+ }
+
+ // grouping handler operations
+ public void addGrouping(final GroupBinding groupBinding) throws Exception {
+ GroupingEncoding groupingEncoding = new GroupingEncoding(groupBinding.getId(), groupBinding.getGroupId(), groupBinding.getClusterName());
+ readLock();
+ try {
+ bindingsJournal.appendAddRecord(groupBinding.getId(), JournalRecordIds.GROUP_RECORD, groupingEncoding, true);
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void deleteGrouping(long tx, final GroupBinding groupBinding) throws Exception {
+ readLock();
+ try {
+ bindingsJournal.appendDeleteRecordTransactional(tx, groupBinding.getId());
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ // BindingsImpl operations
+
+ public void addQueueBinding(final long tx, final Binding binding) throws Exception {
+ Queue queue = (Queue) binding.getBindable();
+
+ Filter filter = queue.getFilter();
+
+ SimpleString filterString = filter == null ? null : filter.getFilterString();
+
+ PersistentQueueBindingEncoding bindingEncoding = new PersistentQueueBindingEncoding(queue.getName(), binding.getAddress(), filterString, queue.getUser(), queue.isAutoCreated());
+
+ readLock();
+ try {
+ bindingsJournal.appendAddRecordTransactional(tx, binding.getID(), JournalRecordIds.QUEUE_BINDING_RECORD, bindingEncoding);
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void deleteQueueBinding(long tx, final long queueBindingID) throws Exception {
+ readLock();
+ try {
+ bindingsJournal.appendDeleteRecordTransactional(tx, queueBindingID);
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public long storePageCounterInc(long txID, long queueID, int value) throws Exception {
+ readLock();
+ try {
+ long recordID = idGenerator.generateID();
+ messageJournal.appendAddRecordTransactional(txID, recordID, JournalRecordIds.PAGE_CURSOR_COUNTER_INC, new PageCountRecordInc(queueID, value));
+ return recordID;
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public long storePageCounterInc(long queueID, int value) throws Exception {
+ readLock();
+ try {
+ final long recordID = idGenerator.generateID();
+ messageJournal.appendAddRecord(recordID, JournalRecordIds.PAGE_CURSOR_COUNTER_INC, new PageCountRecordInc(queueID, value), true, getContext());
+ return recordID;
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ @Override
+ public long storePageCounter(long txID, long queueID, long value) throws Exception {
+ readLock();
+ try {
+ final long recordID = idGenerator.generateID();
+ messageJournal.appendAddRecordTransactional(txID, recordID, JournalRecordIds.PAGE_CURSOR_COUNTER_VALUE, new PageCountRecord(queueID, value));
+ return recordID;
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ @Override
+ public long storePendingCounter(final long queueID, final long pageID, final int inc) throws Exception {
+ readLock();
+ try {
+ final long recordID = idGenerator.generateID();
+ PageCountPendingImpl pendingInc = new PageCountPendingImpl(queueID, pageID, inc);
+ // We must guarantee the record sync before we actually write on the page otherwise we may get out of sync
+ // on the counter
+ messageJournal.appendAddRecord(recordID, JournalRecordIds.PAGE_CURSOR_PENDING_COUNTER, pendingInc, true);
+ return recordID;
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void deleteIncrementRecord(long txID, long recordID) throws Exception {
+ readLock();
+ try {
+ messageJournal.appendDeleteRecordTransactional(txID, recordID);
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void deletePageCounter(long txID, long recordID) throws Exception {
+ readLock();
+ try {
+ messageJournal.appendDeleteRecordTransactional(txID, recordID);
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void deletePendingPageCounter(long txID, long recordID) throws Exception {
+ readLock();
+ try {
+ messageJournal.appendDeleteRecordTransactional(txID, recordID);
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public JournalLoadInformation loadBindingJournal(final List queueBindingInfos,
+ final List groupingInfos) throws Exception {
+ List records = new ArrayList();
+
+ List preparedTransactions = new ArrayList();
+
+ JournalLoadInformation bindingsInfo = bindingsJournal.load(records, preparedTransactions, null);
+
+ for (RecordInfo record : records) {
+ long id = record.id;
+
+ ActiveMQBuffer buffer = ActiveMQBuffers.wrappedBuffer(record.data);
+
+ byte rec = record.getUserRecordType();
+
+ if (rec == JournalRecordIds.QUEUE_BINDING_RECORD) {
+ PersistentQueueBindingEncoding bindingEncoding = newBindingEncoding(id, buffer);
+
+ queueBindingInfos.add(bindingEncoding);
+ }
+ else if (rec == JournalRecordIds.ID_COUNTER_RECORD) {
+ idGenerator.loadState(record.id, buffer);
+ }
+ else if (rec == JournalRecordIds.GROUP_RECORD) {
+ GroupingEncoding encoding = newGroupEncoding(id, buffer);
+ groupingInfos.add(encoding);
+ }
+ else if (rec == JournalRecordIds.ADDRESS_SETTING_RECORD) {
+ PersistedAddressSetting setting = newAddressEncoding(id, buffer);
+ mapPersistedAddressSettings.put(setting.getAddressMatch(), setting);
+ }
+ else if (rec == JournalRecordIds.SECURITY_RECORD) {
+ PersistedRoles roles = newSecurityRecord(id, buffer);
+ mapPersistedRoles.put(roles.getAddressMatch(), roles);
+ }
+ else {
+ throw new IllegalStateException("Invalid record type " + rec);
+ }
+ }
+
+ // This will instruct the IDGenerator to beforeStop old records
+ idGenerator.cleanup();
+
+ return bindingsInfo;
+ }
+
+ public void lineUpContext() {
+ readLock();
+ try {
+ messageJournal.lineUpContext(getContext());
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ // ActiveMQComponent implementation
+ // ------------------------------------------------------
+
+ protected abstract void beforeStart() throws Exception;
+
+ public synchronized void start() throws Exception {
+ if (started) {
+ return;
+ }
+
+ beforeStart();
+
+ singleThreadExecutor = Executors.newSingleThreadExecutor(AccessController.doPrivileged(new PrivilegedAction() {
+ @Override
+ public ActiveMQThreadFactory run() {
+ return new ActiveMQThreadFactory("ActiveMQ-IO-SingleThread", true, JournalStorageManager.class.getClassLoader());
+ }
+ }));
+
+ bindingsJournal.start();
+
+ messageJournal.start();
+
+ started = true;
+ }
+
+ public void stop() throws Exception {
+ stop(false);
+ }
+
+ @Override
+ public synchronized void persistIdGenerator() {
+ if (journalLoaded && idGenerator != null) {
+ // Must call close to make sure last id is persisted
+ idGenerator.persistCurrentID();
+ }
+ }
+
+ /**
+ * Assumption is that this is only called with a writeLock on the StorageManager.
+ */
+ protected abstract void performCachedLargeMessageDeletes();
+
+ public synchronized void stop(boolean ioCriticalError) throws Exception {
+ if (!started) {
+ return;
+ }
+
+ if (!ioCriticalError) {
+ performCachedLargeMessageDeletes();
+ // Must call close to make sure last id is persisted
+ if (journalLoaded && idGenerator != null)
+ idGenerator.persistCurrentID();
+ }
+
+ final CountDownLatch latch = new CountDownLatch(1);
+ executor.execute(new Runnable() {
+ @Override
+ public void run() {
+ latch.countDown();
+ }
+ });
+
+ latch.await(30, TimeUnit.SECONDS);
+
+ beforeStop();
+
+ bindingsJournal.stop();
+
+ messageJournal.stop();
+
+ singleThreadExecutor.shutdown();
+
+ journalLoaded = false;
+
+ started = false;
+ }
+
+ protected abstract void beforeStop() throws Exception;
+
+ public synchronized boolean isStarted() {
+ return started;
+ }
+
+ /**
+ * TODO: Is this still being used ?
+ */
+ public JournalLoadInformation[] loadInternalOnly() throws Exception {
+ readLock();
+ try {
+ JournalLoadInformation[] info = new JournalLoadInformation[2];
+ info[0] = bindingsJournal.loadInternalOnly();
+ info[1] = messageJournal.loadInternalOnly();
+
+ return info;
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ public void beforePageRead() throws Exception {
+ if (pageMaxConcurrentIO != null) {
+ pageMaxConcurrentIO.acquire();
+ }
+ }
+
+ public void afterPageRead() throws Exception {
+ if (pageMaxConcurrentIO != null) {
+ pageMaxConcurrentIO.release();
+ }
+ }
+
+ // Public -----------------------------------------------------------------------------------
+
+ public Journal getMessageJournal() {
+ return messageJournal;
+ }
+
+ public Journal getBindingsJournal() {
+ return bindingsJournal;
+ }
+
+ // Package protected ---------------------------------------------
+
+ protected void confirmLargeMessage(final LargeServerMessage largeServerMessage) {
+ if (largeServerMessage.getPendingRecordID() >= 0) {
+ try {
+ confirmPendingLargeMessage(largeServerMessage.getPendingRecordID());
+ largeServerMessage.setPendingRecordID(-1);
+ }
+ catch (Exception e) {
+ ActiveMQServerLogger.LOGGER.warn(e.getMessage(), e);
+ }
+ }
+ }
+
+ protected abstract LargeServerMessage parseLargeMessage(Map messages,
+ ActiveMQBuffer buff) throws Exception;
+
+ private void loadPreparedTransactions(final PostOffice postOffice,
+ final PagingManager pagingManager,
+ final ResourceManager resourceManager,
+ final Map queueInfos,
+ final List preparedTransactions,
+ final Map>> duplicateIDMap,
+ final Map pageSubscriptions,
+ final Set> pendingLargeMessages,
+ JournalLoader journalLoader) throws Exception {
+ // recover prepared transactions
+ for (PreparedTransactionInfo preparedTransaction : preparedTransactions) {
+ XidEncoding encodingXid = new XidEncoding(preparedTransaction.getExtraData());
+
+ Xid xid = encodingXid.xid;
+
+ Transaction tx = new TransactionImpl(preparedTransaction.getId(), xid, this);
+
+ List referencesToAck = new ArrayList();
+
+ Map messages = new HashMap();
+
+ // Use same method as load message journal to prune out acks, so they don't get added.
+ // Then have reacknowledge(tx) methods on queue, which needs to add the page size
+
+ // first get any sent messages for this tx and recreate
+ for (RecordInfo record : preparedTransaction.getRecords()) {
+ byte[] data = record.data;
+
+ ActiveMQBuffer buff = ActiveMQBuffers.wrappedBuffer(data);
+
+ byte recordType = record.getUserRecordType();
+
+ switch (recordType) {
+ case JournalRecordIds.ADD_LARGE_MESSAGE: {
+ messages.put(record.id, parseLargeMessage(messages, buff));
+
+ break;
+ }
+ case JournalRecordIds.ADD_MESSAGE: {
+ ServerMessage message = new ServerMessageImpl(record.id, 50);
+
+ message.decode(buff);
+
+ messages.put(record.id, message);
+
+ break;
+ }
+ case JournalRecordIds.ADD_REF: {
+ long messageID = record.id;
+
+ RefEncoding encoding = new RefEncoding();
+
+ encoding.decode(buff);
+
+ ServerMessage message = messages.get(messageID);
+
+ if (message == null) {
+ throw new IllegalStateException("Cannot find message with id " + messageID);
+ }
+
+ journalLoader.handlePreparedSendMessage(message, tx, encoding.queueID);
+
+ break;
+ }
+ case JournalRecordIds.ACKNOWLEDGE_REF: {
+ long messageID = record.id;
+
+ RefEncoding encoding = new RefEncoding();
+
+ encoding.decode(buff);
+
+ journalLoader.handlePreparedAcknowledge(messageID, referencesToAck, encoding.queueID);
+
+ break;
+ }
+ case JournalRecordIds.PAGE_TRANSACTION: {
+
+ PageTransactionInfo pageTransactionInfo = new PageTransactionInfoImpl();
+
+ pageTransactionInfo.decode(buff);
+
+ if (record.isUpdate) {
+ PageTransactionInfo pgTX = pagingManager.getTransaction(pageTransactionInfo.getTransactionID());
+ pgTX.reloadUpdate(this, pagingManager, tx, pageTransactionInfo.getNumberOfMessages());
+ }
+ else {
+ pageTransactionInfo.setCommitted(false);
+
+ tx.putProperty(TransactionPropertyIndexes.PAGE_TRANSACTION, pageTransactionInfo);
+
+ pagingManager.addTransaction(pageTransactionInfo);
+
+ tx.addOperation(new FinishPageMessageOperation());
+ }
+
+ break;
+ }
+ case SET_SCHEDULED_DELIVERY_TIME: {
+ // Do nothing - for prepared txs, the set scheduled delivery time will only occur in a send in which
+ // case the message will already have the header for the scheduled delivery time, so no need to do
+ // anything.
+
+ break;
+ }
+ case DUPLICATE_ID: {
+ // We need load the duplicate ids at prepare time too
+ DuplicateIDEncoding encoding = new DuplicateIDEncoding();
+
+ encoding.decode(buff);
+
+ DuplicateIDCache cache = postOffice.getDuplicateIDCache(encoding.address);
+
+ cache.load(tx, encoding.duplID);
+
+ break;
+ }
+ case ACKNOWLEDGE_CURSOR: {
+ CursorAckRecordEncoding encoding = new CursorAckRecordEncoding();
+ encoding.decode(buff);
+
+ encoding.position.setRecordID(record.id);
+
+ PageSubscription sub = locateSubscription(encoding.queueID, pageSubscriptions, queueInfos, pagingManager);
+
+ if (sub != null) {
+ sub.reloadPreparedACK(tx, encoding.position);
+ referencesToAck.add(new PagedReferenceImpl(encoding.position, null, sub));
+ }
+ else {
+ ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloadingACK(encoding.queueID);
+ }
+ break;
+ }
+ case PAGE_CURSOR_COUNTER_VALUE: {
+ ActiveMQServerLogger.LOGGER.journalPAGEOnPrepared();
+
+ break;
+ }
+
+ case PAGE_CURSOR_COUNTER_INC: {
+ PageCountRecordInc encoding = new PageCountRecordInc();
+
+ encoding.decode(buff);
+
+ PageSubscription sub = locateSubscription(encoding.getQueueID(), pageSubscriptions, queueInfos, pagingManager);
+
+ if (sub != null) {
+ sub.getCounter().applyIncrementOnTX(tx, record.id, encoding.getValue());
+ sub.notEmpty();
+ }
+ else {
+ ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloadingACK(encoding.getQueueID());
+ }
+
+ break;
+ }
+
+ default: {
+ ActiveMQServerLogger.LOGGER.journalInvalidRecordType(recordType);
+ }
+ }
+ }
+
+ for (RecordInfo recordDeleted : preparedTransaction.getRecordsToDelete()) {
+ byte[] data = recordDeleted.data;
+
+ if (data.length > 0) {
+ ActiveMQBuffer buff = ActiveMQBuffers.wrappedBuffer(data);
+ byte b = buff.readByte();
+
+ switch (b) {
+ case ADD_LARGE_MESSAGE_PENDING: {
+ long messageID = buff.readLong();
+ if (!pendingLargeMessages.remove(new Pair(recordDeleted.id, messageID))) {
+ ActiveMQServerLogger.LOGGER.largeMessageNotFound(recordDeleted.id);
+ }
+ installLargeMessageConfirmationOnTX(tx, recordDeleted.id);
+ break;
+ }
+ default:
+ ActiveMQServerLogger.LOGGER.journalInvalidRecordTypeOnPreparedTX(b);
+ }
+ }
+
+ }
+
+ journalLoader.handlePreparedTransaction(tx, referencesToAck, xid, resourceManager);
+ }
+ }
+
+ OperationContext getContext(final boolean sync) {
+ if (sync) {
+ return getContext();
+ }
+ else {
+ return DummyOperationContext.getInstance();
+ }
+ }
+
+ // Inner Classes
+ // ----------------------------------------------------------------------------
+
+ private static final class DummyOperationContext implements OperationContext {
+
+ private static DummyOperationContext instance = new DummyOperationContext();
+
+ public static OperationContext getInstance() {
+ return DummyOperationContext.instance;
+ }
+
+ public void executeOnCompletion(final IOCallback runnable) {
+ // There are no executeOnCompletion calls while using the DummyOperationContext
+ // However we keep the code here for correctness
+ runnable.done();
+ }
+
+ public void replicationDone() {
+ }
+
+ public void replicationLineUp() {
+ }
+
+ public void storeLineUp() {
+ }
+
+ public void done() {
+ }
+
+ public void onError(final int errorCode, final String errorMessage) {
+ }
+
+ public void waitCompletion() {
+ }
+
+ public boolean waitCompletion(final long timeout) {
+ return true;
+ }
+
+ public void pageSyncLineUp() {
+ }
+
+ public void pageSyncDone() {
+ }
+ }
+
+ /**
+ * @param id
+ * @param buffer
+ * @return
+ */
+ protected static PersistedRoles newSecurityRecord(long id, ActiveMQBuffer buffer) {
+ PersistedRoles roles = new PersistedRoles();
+ roles.decode(buffer);
+ roles.setStoreId(id);
+ return roles;
+ }
+
+ /**
+ * @param id
+ * @param buffer
+ * @return
+ */
+ static PersistedAddressSetting newAddressEncoding(long id, ActiveMQBuffer buffer) {
+ PersistedAddressSetting setting = new PersistedAddressSetting();
+ setting.decode(buffer);
+ setting.setStoreId(id);
+ return setting;
+ }
+
+ /**
+ * @param id
+ * @param buffer
+ * @return
+ */
+ static GroupingEncoding newGroupEncoding(long id, ActiveMQBuffer buffer) {
+ GroupingEncoding encoding = new GroupingEncoding();
+ encoding.decode(buffer);
+ encoding.setId(id);
+ return encoding;
+ }
+
+ /**
+ * @param id
+ * @param buffer
+ * @return
+ */
+ protected static PersistentQueueBindingEncoding newBindingEncoding(long id, ActiveMQBuffer buffer) {
+ PersistentQueueBindingEncoding bindingEncoding = new PersistentQueueBindingEncoding();
+
+ bindingEncoding.decode(buffer);
+
+ bindingEncoding.setId(id);
+ return bindingEncoding;
+ }
+
+ @Override
+ public boolean addToPage(PagingStore store,
+ ServerMessage msg,
+ Transaction tx,
+ RouteContextList listCtx) throws Exception {
+ /**
+ * Exposing the read-lock here is an encapsulation violation done in order to keep the code
+ * simpler. The alternative would be to add a second method, say 'verifyPaging', to
+ * PagingStore.
+ *
+ * Adding this second method would also be more surprise prone as it would require a certain
+ * calling order.
+ *
+ * The reasoning is that exposing the lock is more explicit and therefore `less bad`.
+ */
+ return store.page(msg, tx, listCtx, storageManagerLock.readLock());
+ }
+
+ private void installLargeMessageConfirmationOnTX(Transaction tx, long recordID) {
+ TXLargeMessageConfirmationOperation txoper = (TXLargeMessageConfirmationOperation) tx.getProperty(TransactionPropertyIndexes.LARGE_MESSAGE_CONFIRMATIONS);
+ if (txoper == null) {
+ txoper = new TXLargeMessageConfirmationOperation(this);
+ tx.putProperty(TransactionPropertyIndexes.LARGE_MESSAGE_CONFIRMATIONS, txoper);
+ }
+ txoper.confirmedMessages.add(recordID);
+ }
+}
diff --git a/artemis-server/src/main/java/org/apache/activemq/artemis/core/persistence/impl/journal/AddMessageRecord.java b/artemis-server/src/main/java/org/apache/activemq/artemis/core/persistence/impl/journal/AddMessageRecord.java
index fdae48350a..3ca38e3434 100644
--- a/artemis-server/src/main/java/org/apache/activemq/artemis/core/persistence/impl/journal/AddMessageRecord.java
+++ b/artemis-server/src/main/java/org/apache/activemq/artemis/core/persistence/impl/journal/AddMessageRecord.java
@@ -26,11 +26,9 @@ public final class AddMessageRecord {
final ServerMessage message;
- // mtaylor (Added to compile)
- public long scheduledDeliveryTime;
+ private long scheduledDeliveryTime;
- // mtaylor (Added to compile)
- public int deliveryCount;
+ private int deliveryCount;
public ServerMessage getMessage() {
return message;
@@ -44,4 +42,11 @@ public final class AddMessageRecord {
return deliveryCount;
}
+ public void setScheduledDeliveryTime(long scheduledDeliveryTime) {
+ this.scheduledDeliveryTime = scheduledDeliveryTime;
+ }
+
+ public void setDeliveryCount(int deliveryCount) {
+ this.deliveryCount = deliveryCount;
+ }
}
diff --git a/artemis-server/src/main/java/org/apache/activemq/artemis/core/persistence/impl/journal/JDBCJournalStorageManager.java b/artemis-server/src/main/java/org/apache/activemq/artemis/core/persistence/impl/journal/JDBCJournalStorageManager.java
new file mode 100644
index 0000000000..4616e78d61
--- /dev/null
+++ b/artemis-server/src/main/java/org/apache/activemq/artemis/core/persistence/impl/journal/JDBCJournalStorageManager.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.activemq.artemis.core.persistence.impl.journal;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.activemq.artemis.core.config.Configuration;
+import org.apache.activemq.artemis.core.config.storage.DatabaseStorageConfiguration;
+import org.apache.activemq.artemis.core.io.IOCriticalErrorListener;
+import org.apache.activemq.artemis.core.journal.Journal;
+import org.apache.activemq.artemis.jdbc.store.journal.JDBCJournalImpl;
+import org.apache.activemq.artemis.utils.ExecutorFactory;
+
+public class JDBCJournalStorageManager extends JournalStorageManager {
+
+ public JDBCJournalStorageManager(Configuration config, ExecutorFactory executorFactory) {
+ super(config, executorFactory);
+ }
+
+ public JDBCJournalStorageManager(final Configuration config,
+ final ExecutorFactory executorFactory,
+ final IOCriticalErrorListener criticalErrorListener) {
+ super(config, executorFactory, criticalErrorListener);
+ }
+
+ @Override
+ protected void init(Configuration config, IOCriticalErrorListener criticalErrorListener) {
+ DatabaseStorageConfiguration dbConf = (DatabaseStorageConfiguration) config.getStoreConfiguration();
+
+ Journal localBindings = new JDBCJournalImpl(dbConf.getJdbcConnectionUrl(), dbConf.getBindingsTableName());
+ bindingsJournal = localBindings;
+
+ Journal localMessage = new JDBCJournalImpl(dbConf.getJdbcConnectionUrl(), dbConf.getMessageTableName());
+ messageJournal = localMessage;
+ }
+
+ @Override
+ public synchronized void stop(boolean ioCriticalError) throws Exception {
+ if (!started) {
+ return;
+ }
+
+ if (!ioCriticalError) {
+ performCachedLargeMessageDeletes();
+ // Must call close to make sure last id is persisted
+ if (journalLoaded && idGenerator != null)
+ idGenerator.persistCurrentID();
+ }
+
+ final CountDownLatch latch = new CountDownLatch(1);
+ executor.execute(new Runnable() {
+ @Override
+ public void run() {
+ latch.countDown();
+ }
+ });
+
+ latch.await(30, TimeUnit.SECONDS);
+
+ beforeStop();
+
+ ((JDBCJournalImpl) bindingsJournal).stop(false);
+
+ messageJournal.stop();
+
+ singleThreadExecutor.shutdown();
+
+ journalLoaded = false;
+
+ started = false;
+ }
+
+}
diff --git a/artemis-server/src/main/java/org/apache/activemq/artemis/core/persistence/impl/journal/JournalRecordIds.java b/artemis-server/src/main/java/org/apache/activemq/artemis/core/persistence/impl/journal/JournalRecordIds.java
index 0242b5026b..4aa470b11b 100644
--- a/artemis-server/src/main/java/org/apache/activemq/artemis/core/persistence/impl/journal/JournalRecordIds.java
+++ b/artemis-server/src/main/java/org/apache/activemq/artemis/core/persistence/impl/journal/JournalRecordIds.java
@@ -27,7 +27,6 @@ public final class JournalRecordIds {
// grouping journal record type
- // mtaylor Added to compile
public static final byte GROUP_RECORD = 20;
// BindingsImpl journal record type
diff --git a/artemis-server/src/main/java/org/apache/activemq/artemis/core/persistence/impl/journal/JournalStorageManager.java b/artemis-server/src/main/java/org/apache/activemq/artemis/core/persistence/impl/journal/JournalStorageManager.java
index c272a12bf5..c1ef0d32be 100644
--- a/artemis-server/src/main/java/org/apache/activemq/artemis/core/persistence/impl/journal/JournalStorageManager.java
+++ b/artemis-server/src/main/java/org/apache/activemq/artemis/core/persistence/impl/journal/JournalStorageManager.java
@@ -14,38 +14,22 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
package org.apache.activemq.artemis.core.persistence.impl.journal;
-import javax.transaction.xa.Xid;
import java.io.File;
-import java.io.FileInputStream;
import java.nio.ByteBuffer;
-import java.security.AccessController;
-import java.security.DigestInputStream;
-import java.security.InvalidParameterException;
-import java.security.MessageDigest;
-import java.security.PrivilegedAction;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
-import java.util.HashSet;
import java.util.Iterator;
-import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.Executor;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.activemq.artemis.api.core.ActiveMQBuffer;
-import org.apache.activemq.artemis.api.core.ActiveMQBuffers;
import org.apache.activemq.artemis.api.core.ActiveMQException;
import org.apache.activemq.artemis.api.core.ActiveMQIllegalStateException;
import org.apache.activemq.artemis.api.core.ActiveMQInternalErrorException;
@@ -53,172 +37,45 @@ import org.apache.activemq.artemis.api.core.Message;
import org.apache.activemq.artemis.api.core.Pair;
import org.apache.activemq.artemis.api.core.SimpleString;
import org.apache.activemq.artemis.core.config.Configuration;
-import org.apache.activemq.artemis.core.filter.Filter;
-import org.apache.activemq.artemis.core.io.IOCallback;
import org.apache.activemq.artemis.core.io.IOCriticalErrorListener;
import org.apache.activemq.artemis.core.io.SequentialFile;
import org.apache.activemq.artemis.core.io.SequentialFileFactory;
import org.apache.activemq.artemis.core.io.aio.AIOSequentialFileFactory;
import org.apache.activemq.artemis.core.io.nio.NIOSequentialFileFactory;
import org.apache.activemq.artemis.core.journal.Journal;
-import org.apache.activemq.artemis.core.journal.JournalLoadInformation;
-import org.apache.activemq.artemis.core.journal.PreparedTransactionInfo;
-import org.apache.activemq.artemis.core.journal.RecordInfo;
import org.apache.activemq.artemis.core.journal.impl.JournalFile;
import org.apache.activemq.artemis.core.journal.impl.JournalImpl;
import org.apache.activemq.artemis.core.message.impl.MessageInternal;
-import org.apache.activemq.artemis.core.paging.PageTransactionInfo;
import org.apache.activemq.artemis.core.paging.PagedMessage;
import org.apache.activemq.artemis.core.paging.PagingManager;
import org.apache.activemq.artemis.core.paging.PagingStore;
-import org.apache.activemq.artemis.core.paging.cursor.PagePosition;
-import org.apache.activemq.artemis.core.paging.cursor.PageSubscription;
-import org.apache.activemq.artemis.core.paging.cursor.PagedReferenceImpl;
-import org.apache.activemq.artemis.core.paging.impl.PageTransactionInfoImpl;
-import org.apache.activemq.artemis.core.persistence.GroupingInfo;
import org.apache.activemq.artemis.core.persistence.OperationContext;
-import org.apache.activemq.artemis.core.persistence.QueueBindingInfo;
-import org.apache.activemq.artemis.core.persistence.StorageManager;
-import org.apache.activemq.artemis.core.persistence.config.PersistedAddressSetting;
-import org.apache.activemq.artemis.core.persistence.config.PersistedRoles;
-import org.apache.activemq.artemis.core.persistence.impl.PageCountPending;
-import org.apache.activemq.artemis.core.persistence.impl.journal.codec.CursorAckRecordEncoding;
-import org.apache.activemq.artemis.core.persistence.impl.journal.codec.DeleteEncoding;
-import org.apache.activemq.artemis.core.persistence.impl.journal.codec.DeliveryCountUpdateEncoding;
-import org.apache.activemq.artemis.core.persistence.impl.journal.codec.DuplicateIDEncoding;
-import org.apache.activemq.artemis.core.persistence.impl.journal.codec.FinishPageMessageOperation;
-import org.apache.activemq.artemis.core.persistence.impl.journal.codec.GroupingEncoding;
-import org.apache.activemq.artemis.core.persistence.impl.journal.codec.HeuristicCompletionEncoding;
import org.apache.activemq.artemis.core.persistence.impl.journal.codec.LargeMessageEncoding;
-import org.apache.activemq.artemis.core.persistence.impl.journal.codec.PageCountPendingImpl;
-import org.apache.activemq.artemis.core.persistence.impl.journal.codec.PageCountRecord;
-import org.apache.activemq.artemis.core.persistence.impl.journal.codec.PageCountRecordInc;
-import org.apache.activemq.artemis.core.persistence.impl.journal.codec.PageUpdateTXEncoding;
import org.apache.activemq.artemis.core.persistence.impl.journal.codec.PendingLargeMessageEncoding;
-import org.apache.activemq.artemis.core.persistence.impl.journal.codec.PersistentQueueBindingEncoding;
-import org.apache.activemq.artemis.core.persistence.impl.journal.codec.RefEncoding;
-import org.apache.activemq.artemis.core.persistence.impl.journal.codec.ScheduledDeliveryEncoding;
-import org.apache.activemq.artemis.core.persistence.impl.journal.codec.XidEncoding;
-import org.apache.activemq.artemis.core.postoffice.Binding;
-import org.apache.activemq.artemis.core.postoffice.DuplicateIDCache;
-import org.apache.activemq.artemis.core.postoffice.PostOffice;
-import org.apache.activemq.artemis.core.protocol.core.impl.wireformat.ReplicationLiveIsStoppingMessage.LiveStopping;
+import org.apache.activemq.artemis.core.protocol.core.impl.wireformat.ReplicationLiveIsStoppingMessage;
import org.apache.activemq.artemis.core.replication.ReplicatedJournal;
import org.apache.activemq.artemis.core.replication.ReplicationManager;
import org.apache.activemq.artemis.core.server.ActiveMQMessageBundle;
import org.apache.activemq.artemis.core.server.ActiveMQServerLogger;
import org.apache.activemq.artemis.core.server.JournalType;
import org.apache.activemq.artemis.core.server.LargeServerMessage;
-import org.apache.activemq.artemis.core.server.MessageReference;
-import org.apache.activemq.artemis.core.server.Queue;
-import org.apache.activemq.artemis.core.server.RouteContextList;
import org.apache.activemq.artemis.core.server.ServerMessage;
-import org.apache.activemq.artemis.core.server.group.impl.GroupBinding;
-import org.apache.activemq.artemis.core.server.impl.JournalLoader;
-import org.apache.activemq.artemis.core.server.impl.ServerMessageImpl;
-import org.apache.activemq.artemis.core.transaction.ResourceManager;
-import org.apache.activemq.artemis.core.transaction.Transaction;
-import org.apache.activemq.artemis.core.transaction.TransactionPropertyIndexes;
-import org.apache.activemq.artemis.core.transaction.impl.TransactionImpl;
-import org.apache.activemq.artemis.utils.ActiveMQThreadFactory;
-import org.apache.activemq.artemis.utils.Base64;
import org.apache.activemq.artemis.utils.ExecutorFactory;
-import org.apache.activemq.artemis.utils.IDGenerator;
-import static org.apache.activemq.artemis.core.persistence.impl.journal.JournalRecordIds.ACKNOWLEDGE_CURSOR;
-import static org.apache.activemq.artemis.core.persistence.impl.journal.JournalRecordIds.ADD_LARGE_MESSAGE_PENDING;
-import static org.apache.activemq.artemis.core.persistence.impl.journal.JournalRecordIds.DUPLICATE_ID;
-import static org.apache.activemq.artemis.core.persistence.impl.journal.JournalRecordIds.PAGE_CURSOR_COUNTER_INC;
-import static org.apache.activemq.artemis.core.persistence.impl.journal.JournalRecordIds.PAGE_CURSOR_COUNTER_VALUE;
-import static org.apache.activemq.artemis.core.persistence.impl.journal.JournalRecordIds.SET_SCHEDULED_DELIVERY_TIME;
+public class JournalStorageManager extends AbstractJournalStorageManager {
-/**
- * Controls access to the journals and other storage files such as the ones used to store pages and
- * large messages. This class must control writing of any non-transient data, as it is the key point
- * for synchronizing a replicating backup server.
- *
- * Using this class also ensures that locks are acquired in the right order, avoiding dead-locks.
- *
- * Notice that, turning on and off replication (on the live server side) is _mostly_ a matter of
- * using {@link ReplicatedJournal}s instead of regular {@link JournalImpl}, and sync the existing
- * data. For details see the Javadoc of
- * {@link #startReplication(ReplicationManager, PagingManager, String, boolean)}.
- *
- */
-public class JournalStorageManager implements StorageManager {
+ private SequentialFileFactory journalFF;
- public enum JournalContent {
- BINDINGS((byte) 0), MESSAGES((byte) 1);
+ private SequentialFileFactory largeMessagesFactory;
- public final byte typeByte;
+ private Journal originalMessageJournal;
- JournalContent(byte b) {
- typeByte = b;
- }
+ private Journal originalBindingsJournal;
- public static JournalContent getType(byte type) {
- if (MESSAGES.typeByte == type)
- return MESSAGES;
- if (BINDINGS.typeByte == type)
- return BINDINGS;
- throw new InvalidParameterException("invalid byte: " + type);
- }
- }
-
- private static final long CHECKPOINT_BATCH_SIZE = Integer.MAX_VALUE;
-
- private final Semaphore pageMaxConcurrentIO;
-
- private final BatchingIDGenerator idGenerator;
-
- private final ReentrantReadWriteLock storageManagerLock = new ReentrantReadWriteLock(true);
+ private String largeMessagesDirectory;
private ReplicationManager replicator;
- private final SequentialFileFactory journalFF;
-
- private Journal messageJournal;
-
- private Journal bindingsJournal;
-
- private final Journal originalMessageJournal;
-
- private final Journal originalBindingsJournal;
-
- private final SequentialFileFactory largeMessagesFactory;
-
- private volatile boolean started;
-
- /**
- * Used to create Operation Contexts
- */
- private final ExecutorFactory executorFactory;
-
- private final Executor executor;
-
- private ExecutorService singleThreadExecutor;
-
- private final boolean syncTransactional;
-
- private final boolean syncNonTransactional;
-
- private final int perfBlastPages;
-
- private final String largeMessagesDirectory;
-
- private boolean journalLoaded = false;
-
- private final IOCriticalErrorListener ioCriticalErrorListener;
-
- private final Configuration config;
-
- // Persisted core configuration
- private final Map mapPersistedRoles = new ConcurrentHashMap<>();
-
- private final Map mapPersistedAddressSettings = new ConcurrentHashMap<>();
-
- private final Set largeMessagesToDelete = new HashSet<>();
-
public JournalStorageManager(final Configuration config, final ExecutorFactory executorFactory) {
this(config, executorFactory, null);
}
@@ -226,13 +83,11 @@ public class JournalStorageManager implements StorageManager {
public JournalStorageManager(final Configuration config,
final ExecutorFactory executorFactory,
final IOCriticalErrorListener criticalErrorListener) {
- this.executorFactory = executorFactory;
+ super(config, executorFactory, criticalErrorListener);
+ }
- this.ioCriticalErrorListener = criticalErrorListener;
-
- this.config = config;
-
- executor = executorFactory.getExecutor();
+ @Override
+ protected void init(Configuration config, IOCriticalErrorListener criticalErrorListener) {
if (config.getJournalType() != JournalType.NIO && config.getJournalType() != JournalType.ASYNCIO) {
throw ActiveMQMessageBundle.BUNDLE.invalidJournal();
@@ -245,10 +100,6 @@ public class JournalStorageManager implements StorageManager {
bindingsJournal = localBindings;
originalBindingsJournal = localBindings;
- syncNonTransactional = config.isJournalSyncNonTransactional();
-
- syncTransactional = config.isJournalSyncTransactional();
-
if (config.getJournalType() == JournalType.ASYNCIO) {
ActiveMQServerLogger.LOGGER.journalUseAIO();
@@ -262,10 +113,7 @@ public class JournalStorageManager implements StorageManager {
throw ActiveMQMessageBundle.BUNDLE.invalidJournalType2(config.getJournalType());
}
- idGenerator = new BatchingIDGenerator(0, JournalStorageManager.CHECKPOINT_BATCH_SIZE, this);
-
Journal localMessage = new JournalImpl(config.getJournalFileSize(), config.getJournalMinFiles(), config.getJournalPoolFiles(), config.getJournalCompactMinFiles(), config.getJournalCompactPercentage(), journalFF, "activemq-data", "amq", config.getJournalType() == JournalType.ASYNCIO ? config.getJournalMaxIO_AIO() : config.getJournalMaxIO_NIO());
-
messageJournal = localMessage;
originalMessageJournal = localMessage;
@@ -283,39 +131,353 @@ public class JournalStorageManager implements StorageManager {
}
}
+ // Life Cycle Handlers
@Override
- public void criticalError(Throwable error) {
- ioCriticalErrorListener.onIOException(error, error.getMessage(), null);
+ protected void beforeStart() throws Exception {
+ checkAndCreateDir(config.getBindingsLocation(), config.isCreateBindingsDir());
+ checkAndCreateDir(config.getJournalLocation(), config.isCreateJournalDir());
+ checkAndCreateDir(config.getLargeMessagesLocation(), config.isCreateJournalDir());
+ cleanupIncompleteFiles();
}
@Override
- public void clearContext() {
- OperationContextImpl.clearContext();
+ protected void beforeStop() throws Exception {
+ if (replicator != null) {
+ replicator.stop();
+ }
+ }
+
+ @Override
+ public void stop() throws Exception {
+ stop(false);
}
public boolean isReplicated() {
return replicator != null;
}
+ private void cleanupIncompleteFiles() throws Exception {
+ if (largeMessagesFactory != null) {
+ List tmpFiles = largeMessagesFactory.listFiles("tmp");
+ for (String tmpFile : tmpFiles) {
+ SequentialFile file = largeMessagesFactory.createSequentialFile(tmpFile);
+ file.delete();
+ }
+ }
+ }
+
+ @Override
+ public synchronized void stop(boolean ioCriticalError) throws Exception {
+ if (!started) {
+ return;
+ }
+
+ if (!ioCriticalError) {
+ performCachedLargeMessageDeletes();
+ // Must call close to make sure last id is persisted
+ if (journalLoaded && idGenerator != null)
+ idGenerator.persistCurrentID();
+ }
+
+ final CountDownLatch latch = new CountDownLatch(1);
+ executor.execute(new Runnable() {
+ @Override
+ public void run() {
+ latch.countDown();
+ }
+ });
+
+ latch.await(30, TimeUnit.SECONDS);
+
+ // We cache the variable as the replicator could be changed between here and the time we call stop
+ // since sendLiveIsStoping my issue a close back from the channel
+ // and we want to ensure a stop here just in case
+ ReplicationManager replicatorInUse = replicator;
+ if (replicatorInUse != null) {
+ final OperationContext token = replicator.sendLiveIsStopping(ReplicationLiveIsStoppingMessage.LiveStopping.FAIL_OVER);
+ if (token != null) {
+ try {
+ token.waitCompletion(5000);
+ }
+ catch (Exception e) {
+ // ignore it
+ }
+ }
+ replicatorInUse.stop();
+ }
+ bindingsJournal.stop();
+
+ messageJournal.stop();
+
+ singleThreadExecutor.shutdown();
+
+ journalLoaded = false;
+
+ started = false;
+ }
+
/**
- * Starts replication at the live-server side.
- *
- * In practice that means 2 things:
- * (1) all currently existing data must be sent to the backup.
- * (2) every new persistent information is replicated (sent) to the backup.
- *
- * To achieve (1), we lock the entire journal while collecting the list of files to send to the
- * backup. The journal does not remain locked during actual synchronization.
- *
- * To achieve (2), instead of writing directly to instances of {@link JournalImpl}, we write to
- * instances of {@link ReplicatedJournal}.
- *
- * At the backup-side replication is handled by {@link org.apache.activemq.artemis.core.replication.ReplicationEndpoint}.
- *
- * @param replicationManager
- * @param pagingManager
- * @throws ActiveMQException
+ * Assumption is that this is only called with a writeLock on the StorageManager.
*/
+ @Override
+ protected void performCachedLargeMessageDeletes() {
+ for (Long largeMsgId : largeMessagesToDelete) {
+ SequentialFile msg = createFileForLargeMessage(largeMsgId, LargeMessageExtension.DURABLE);
+ try {
+ msg.delete();
+ }
+ catch (Exception e) {
+ ActiveMQServerLogger.LOGGER.journalErrorDeletingMessage(e, largeMsgId);
+ }
+ if (replicator != null) {
+ replicator.largeMessageDelete(largeMsgId);
+ }
+ }
+ largeMessagesToDelete.clear();
+ }
+
+ protected SequentialFile createFileForLargeMessage(final long messageID, final boolean durable) {
+ if (durable) {
+ return createFileForLargeMessage(messageID, LargeMessageExtension.DURABLE);
+ }
+ else {
+ return createFileForLargeMessage(messageID, LargeMessageExtension.TEMPORARY);
+ }
+ }
+
+ @Override
+ /**
+ * @param messages
+ * @param buff
+ * @return
+ * @throws Exception
+ */
+ protected LargeServerMessage parseLargeMessage(final Map messages,
+ final ActiveMQBuffer buff) throws Exception {
+ LargeServerMessage largeMessage = createLargeMessage();
+
+ LargeMessageEncoding messageEncoding = new LargeMessageEncoding(largeMessage);
+
+ messageEncoding.decode(buff);
+
+ if (largeMessage.containsProperty(Message.HDR_ORIG_MESSAGE_ID)) {
+ // for compatibility: couple with old behaviour, copying the old file to avoid message loss
+ long originalMessageID = largeMessage.getLongProperty(Message.HDR_ORIG_MESSAGE_ID);
+
+ SequentialFile currentFile = createFileForLargeMessage(largeMessage.getMessageID(), true);
+
+ if (!currentFile.exists()) {
+ SequentialFile linkedFile = createFileForLargeMessage(originalMessageID, true);
+ if (linkedFile.exists()) {
+ linkedFile.copyTo(currentFile);
+ linkedFile.close();
+ }
+ }
+
+ currentFile.close();
+ }
+
+ return largeMessage;
+ }
+
+ @Override
+ public void pageClosed(final SimpleString storeName, final int pageNumber) {
+ if (isReplicated()) {
+ readLock();
+ try {
+ if (isReplicated())
+ replicator.pageClosed(storeName, pageNumber);
+ }
+ finally {
+ readUnLock();
+ }
+ }
+ }
+
+ @Override
+ public void pageDeleted(final SimpleString storeName, final int pageNumber) {
+ if (isReplicated()) {
+ readLock();
+ try {
+ if (isReplicated())
+ replicator.pageDeleted(storeName, pageNumber);
+ }
+ finally {
+ readUnLock();
+ }
+ }
+ }
+
+ @Override
+ public void pageWrite(final PagedMessage message, final int pageNumber) {
+ if (isReplicated()) {
+ // Note: (https://issues.jboss.org/browse/HORNETQ-1059)
+ // We have to replicate durable and non-durable messages on paging
+ // since acknowledgments are written using the page-position.
+ // Say you are sending durable and non-durable messages to a page
+ // The ACKs would be done to wrong positions, and the backup would be a mess
+
+ readLock();
+ try {
+ if (isReplicated())
+ replicator.pageWrite(message, pageNumber);
+ }
+ finally {
+ readUnLock();
+ }
+ }
+ }
+
+ @Override
+ public ByteBuffer allocateDirectBuffer(int size) {
+ return journalFF.allocateDirectBuffer(size);
+ }
+
+ @Override
+ public void freeDirectBuffer(ByteBuffer buffer) {
+ journalFF.releaseBuffer(buffer);
+ }
+
+ public long storePendingLargeMessage(final long messageID) throws Exception {
+ readLock();
+ try {
+ long recordID = generateID();
+
+ messageJournal.appendAddRecord(recordID, JournalRecordIds.ADD_LARGE_MESSAGE_PENDING, new PendingLargeMessageEncoding(messageID), true, getContext(true));
+
+ return recordID;
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ // This should be accessed from this package only
+ void deleteLargeMessageFile(final LargeServerMessage largeServerMessage) throws ActiveMQException {
+ if (largeServerMessage.getPendingRecordID() < 0) {
+ try {
+ // The delete file happens asynchronously
+ // And the client won't be waiting for the actual file to be deleted.
+ // We set a temporary record (short lived) on the journal
+ // to avoid a situation where the server is restarted and pending large message stays on forever
+ largeServerMessage.setPendingRecordID(storePendingLargeMessage(largeServerMessage.getMessageID()));
+ }
+ catch (Exception e) {
+ throw new ActiveMQInternalErrorException(e.getMessage(), e);
+ }
+ }
+ final SequentialFile file = largeServerMessage.getFile();
+ if (file == null) {
+ return;
+ }
+
+ if (largeServerMessage.isDurable() && isReplicated()) {
+ readLock();
+ try {
+ if (isReplicated() && replicator.isSynchronizing()) {
+ synchronized (largeMessagesToDelete) {
+ largeMessagesToDelete.add(Long.valueOf(largeServerMessage.getMessageID()));
+ confirmLargeMessage(largeServerMessage);
+ }
+ return;
+ }
+ }
+ finally {
+ readUnLock();
+ }
+ }
+ Runnable deleteAction = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ readLock();
+ try {
+ if (replicator != null) {
+ replicator.largeMessageDelete(largeServerMessage.getMessageID());
+ }
+ file.delete();
+
+ // The confirm could only be done after the actual delete is done
+ confirmLargeMessage(largeServerMessage);
+ }
+ finally {
+ readUnLock();
+ }
+ }
+ catch (Exception e) {
+ ActiveMQServerLogger.LOGGER.journalErrorDeletingMessage(e, largeServerMessage.getMessageID());
+ }
+ }
+
+ };
+
+ if (executor == null) {
+ deleteAction.run();
+ }
+ else {
+ executor.execute(deleteAction);
+ }
+ }
+
+ @Override
+ public LargeServerMessage createLargeMessage() {
+ return new LargeServerMessageImpl(this);
+ }
+
+ @Override
+ public LargeServerMessage createLargeMessage(final long id, final MessageInternal message) throws Exception {
+ readLock();
+ try {
+ if (isReplicated()) {
+ replicator.largeMessageBegin(id);
+ }
+
+ LargeServerMessageImpl largeMessage = (LargeServerMessageImpl) createLargeMessage();
+
+ largeMessage.copyHeadersAndProperties(message);
+
+ largeMessage.setMessageID(id);
+
+ if (largeMessage.isDurable()) {
+ // We store a marker on the journal that the large file is pending
+ long pendingRecordID = storePendingLargeMessage(id);
+
+ largeMessage.setPendingRecordID(pendingRecordID);
+ }
+
+ return largeMessage;
+ }
+ finally {
+ readUnLock();
+ }
+ }
+
+ @Override
+ public SequentialFile createFileForLargeMessage(final long messageID, LargeMessageExtension extension) {
+ return largeMessagesFactory.createSequentialFile(messageID + extension.getExtension());
+ }
+
+ /**
+ * Send an entire journal file to a replicating backup server.
+ */
+ private void sendJournalFile(JournalFile[] journalFiles, JournalContent type) throws Exception {
+ for (JournalFile jf : journalFiles) {
+ if (!started)
+ return;
+ replicator.syncJournalFile(jf, type);
+ }
+ }
+
+ private JournalFile[] prepareJournalForCopy(Journal journal,
+ JournalContent contentType,
+ String nodeID,
+ boolean autoFailBack) throws Exception {
+ journal.forceMoveNextFile();
+ JournalFile[] datafiles = journal.getDataFiles();
+ replicator.sendStartSyncMessage(datafiles, contentType, nodeID, autoFailBack);
+ return datafiles;
+ }
+
@Override
public void startReplication(ReplicationManager replicationManager,
PagingManager pagingManager,
@@ -412,88 +574,17 @@ public class JournalStorageManager implements StorageManager {
}
}
- public static String md5(File file) {
- try {
- byte[] buffer = new byte[1 << 4];
- MessageDigest md = MessageDigest.getInstance("MD5");
-
- FileInputStream is = new FileInputStream(file);
- DigestInputStream is2 = new DigestInputStream(is, md);
- while (is2.read(buffer) > 0) {
+ private void sendLargeMessageFiles(final Map> pendingLargeMessages) throws Exception {
+ Iterator>> iter = pendingLargeMessages.entrySet().iterator();
+ while (started && iter.hasNext()) {
+ Map.Entry> entry = iter.next();
+ String fileName = entry.getValue().getA();
+ final long id = entry.getKey();
+ long size = entry.getValue().getB();
+ SequentialFile seqFile = largeMessagesFactory.createSequentialFile(fileName);
+ if (!seqFile.exists())
continue;
- }
- byte[] digest = md.digest();
- is.close();
- is2.close();
- return Base64.encodeBytes(digest);
- }
- catch (Exception e) {
- throw new RuntimeException(e);
- }
- }
-
- /**
- * Stops replication by resetting replication-related fields to their 'unreplicated' state.
- */
- @Override
- public void stopReplication() {
- storageManagerLock.writeLock().lock();
- try {
- if (replicator == null)
- return;
- bindingsJournal = originalBindingsJournal;
- messageJournal = originalMessageJournal;
- try {
- replicator.stop();
- }
- catch (Exception e) {
- ActiveMQServerLogger.LOGGER.errorStoppingReplicationManager(e);
- }
- replicator = null;
- // delete inside the writeLock. Avoids a lot of state checking and races with
- // startReplication.
- // This method should not be called under normal circumstances
- performCachedLargeMessageDeletes();
- }
- finally {
- storageManagerLock.writeLock().unlock();
- }
- }
-
- /**
- * Assumption is that this is only called with a writeLock on the StorageManager.
- */
- private void performCachedLargeMessageDeletes() {
- for (Long largeMsgId : largeMessagesToDelete) {
- SequentialFile msg = createFileForLargeMessage(largeMsgId, LargeMessageExtension.DURABLE);
- try {
- msg.delete();
- }
- catch (Exception e) {
- ActiveMQServerLogger.LOGGER.journalErrorDeletingMessage(e, largeMsgId);
- }
- if (replicator != null) {
- replicator.largeMessageDelete(largeMsgId);
- }
- }
- largeMessagesToDelete.clear();
- }
-
- public IDGenerator getIDGenerator() {
- return idGenerator;
- }
-
- /**
- * @param pageFilesToSync
- * @throws Exception
- */
- private void sendPagesToBackup(Map> pageFilesToSync,
- PagingManager manager) throws Exception {
- for (Entry> entry : pageFilesToSync.entrySet()) {
- if (!started)
- return;
- PagingStore store = manager.getPageStore(entry.getKey());
- store.sendPages(replicator, entry.getValue());
+ replicator.syncLargeMessageFile(seqFile, size, id);
}
}
@@ -512,22 +603,18 @@ public class JournalStorageManager implements StorageManager {
return info;
}
- private void sendLargeMessageFiles(final Map> pendingLargeMessages) throws Exception {
- Iterator>> iter = pendingLargeMessages.entrySet().iterator();
- while (started && iter.hasNext()) {
- Map.Entry> entry = iter.next();
- String fileName = entry.getValue().getA();
- final long id = entry.getKey();
- long size = entry.getValue().getB();
- SequentialFile seqFile = largeMessagesFactory.createSequentialFile(fileName);
- if (!seqFile.exists())
- continue;
- replicator.syncLargeMessageFile(seqFile, size, id);
- }
- }
- private long getLargeMessageIdFromFilename(String filename) {
- return Long.parseLong(filename.split("\\.")[0]);
+ private void checkAndCreateDir(final File dir, final boolean create) {
+ if (!dir.exists()) {
+ if (create) {
+ if (!dir.mkdirs()) {
+ throw new IllegalStateException("Failed to create directory " + dir);
+ }
+ }
+ else {
+ throw ActiveMQMessageBundle.BUNDLE.cannotCreateDir(dir.getAbsolutePath());
+ }
+ }
}
/**
@@ -561,136 +648,51 @@ public class JournalStorageManager implements StorageManager {
}
/**
- * Send an entire journal file to a replicating backup server.
+ * @param pageFilesToSync
+ * @throws Exception
*/
- private void sendJournalFile(JournalFile[] journalFiles, JournalContent type) throws Exception {
- for (JournalFile jf : journalFiles) {
+ private void sendPagesToBackup(Map> pageFilesToSync,
+ PagingManager manager) throws Exception {
+ for (Map.Entry> entry : pageFilesToSync.entrySet()) {
if (!started)
return;
- replicator.syncJournalFile(jf, type);
+ PagingStore store = manager.getPageStore(entry.getKey());
+ store.sendPages(replicator, entry.getValue());
}
}
- private JournalFile[] prepareJournalForCopy(Journal journal,
- JournalContent contentType,
- String nodeID,
- boolean autoFailBack) throws Exception {
- journal.forceMoveNextFile();
- JournalFile[] datafiles = journal.getDataFiles();
- replicator.sendStartSyncMessage(datafiles, contentType, nodeID, autoFailBack);
- return datafiles;
+ private long getLargeMessageIdFromFilename(String filename) {
+ return Long.parseLong(filename.split("\\.")[0]);
}
+ /**
+ * Stops replication by resetting replication-related fields to their 'unreplicated' state.
+ */
@Override
- public final void waitOnOperations() throws Exception {
- if (!started) {
- ActiveMQServerLogger.LOGGER.serverIsStopped();
- throw new IllegalStateException("Server is stopped");
- }
- waitOnOperations(0);
- }
-
- @Override
- public final boolean waitOnOperations(final long timeout) throws Exception {
- if (!started) {
- ActiveMQServerLogger.LOGGER.serverIsStopped();
- throw new IllegalStateException("Server is stopped");
- }
- return getContext().waitCompletion(timeout);
- }
-
- @Override
- public void pageClosed(final SimpleString storeName, final int pageNumber) {
- if (isReplicated()) {
- readLock();
+ public void stopReplication() {
+ storageManagerLock.writeLock().lock();
+ try {
+ if (replicator == null)
+ return;
+ bindingsJournal = originalBindingsJournal;
+ messageJournal = originalMessageJournal;
try {
- if (isReplicated())
- replicator.pageClosed(storeName, pageNumber);
+ replicator.stop();
}
- finally {
- readUnLock();
+ catch (Exception e) {
+ ActiveMQServerLogger.LOGGER.errorStoppingReplicationManager(e);
}
+ replicator = null;
+ // delete inside the writeLock. Avoids a lot of state checking and races with
+ // startReplication.
+ // This method should not be called under normal circumstances
+ performCachedLargeMessageDeletes();
}
- }
-
- @Override
- public void pageDeleted(final SimpleString storeName, final int pageNumber) {
- if (isReplicated()) {
- readLock();
- try {
- if (isReplicated())
- replicator.pageDeleted(storeName, pageNumber);
- }
- finally {
- readUnLock();
- }
+ finally {
+ storageManagerLock.writeLock().unlock();
}
}
- @Override
- public void pageWrite(final PagedMessage message, final int pageNumber) {
- if (isReplicated()) {
- // Note: (https://issues.jboss.org/browse/HORNETQ-1059)
- // We have to replicate durable and non-durable messages on paging
- // since acknowledgments are written using the page-position.
- // Say you are sending durable and non-durable messages to a page
- // The ACKs would be done to wrong positions, and the backup would be a mess
-
- readLock();
- try {
- if (isReplicated())
- replicator.pageWrite(message, pageNumber);
- }
- finally {
- readUnLock();
- }
- }
- }
-
- @Override
- public OperationContext getContext() {
- return OperationContextImpl.getContext(executorFactory);
- }
-
- @Override
- public void setContext(final OperationContext context) {
- OperationContextImpl.setContext(context);
- }
-
- public Executor getSingleThreadExecutor() {
- return singleThreadExecutor;
- }
-
- @Override
- public OperationContext newSingleThreadContext() {
- return newContext(singleThreadExecutor);
- }
-
- @Override
- public OperationContext newContext(final Executor executor1) {
- return new OperationContextImpl(executor1);
- }
-
- @Override
- public void afterCompleteOperations(final IOCallback run) {
- getContext().executeOnCompletion(run);
- }
-
- @Override
- public long generateID() {
- return idGenerator.generateID();
- }
-
- @Override
- public long getCurrentID() {
- return idGenerator.getCurrentID();
- }
-
- @Override
- public LargeServerMessage createLargeMessage() {
- return new LargeServerMessageImpl(this);
- }
-
@Override
public final void addBytesToLargeMessage(final SequentialFile file,
final long messageId,
@@ -709,1806 +711,4 @@ public class JournalStorageManager implements StorageManager {
readUnLock();
}
}
-
- @Override
- public LargeServerMessage createLargeMessage(final long id, final MessageInternal message) throws Exception {
- readLock();
- try {
- if (isReplicated()) {
- replicator.largeMessageBegin(id);
- }
-
- LargeServerMessageImpl largeMessage = (LargeServerMessageImpl) createLargeMessage();
-
- largeMessage.copyHeadersAndProperties(message);
-
- largeMessage.setMessageID(id);
-
- if (largeMessage.isDurable()) {
- // We store a marker on the journal that the large file is pending
- long pendingRecordID = storePendingLargeMessage(id);
-
- largeMessage.setPendingRecordID(pendingRecordID);
- }
-
- return largeMessage;
- }
- finally {
- readUnLock();
- }
- }
-
- // Non transactional operations
-
- public long storePendingLargeMessage(final long messageID) throws Exception {
- readLock();
- try {
- long recordID = generateID();
-
- messageJournal.appendAddRecord(recordID, JournalRecordIds.ADD_LARGE_MESSAGE_PENDING, new PendingLargeMessageEncoding(messageID), true, getContext(true));
-
- return recordID;
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void confirmPendingLargeMessageTX(final Transaction tx, long messageID, long recordID) throws Exception {
- readLock();
- try {
- installLargeMessageConfirmationOnTX(tx, recordID);
- messageJournal.appendDeleteRecordTransactional(tx.getID(), recordID, new DeleteEncoding(JournalRecordIds.ADD_LARGE_MESSAGE_PENDING, messageID));
- }
- finally {
- readUnLock();
- }
- }
-
- /**
- * We don't need messageID now but we are likely to need it we ever decide to support a database
- */
- @Override
- public void confirmPendingLargeMessage(long recordID) throws Exception {
- readLock();
- try {
- messageJournal.appendDeleteRecord(recordID, true, getContext());
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void storeMessage(final ServerMessage message) throws Exception {
- if (message.getMessageID() <= 0) {
- // Sanity check only... this shouldn't happen unless there is a bug
- throw ActiveMQMessageBundle.BUNDLE.messageIdNotAssigned();
- }
-
- readLock();
- try {
- // Note that we don't sync, the add reference that comes immediately after will sync if
- // appropriate
-
- if (message.isLargeMessage()) {
- messageJournal.appendAddRecord(message.getMessageID(), JournalRecordIds.ADD_LARGE_MESSAGE, new LargeMessageEncoding((LargeServerMessage) message), false, getContext(false));
- }
- else {
- messageJournal.appendAddRecord(message.getMessageID(), JournalRecordIds.ADD_MESSAGE, message, false, getContext(false));
- }
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void storeReference(final long queueID, final long messageID, final boolean last) throws Exception {
- readLock();
- try {
- messageJournal.appendUpdateRecord(messageID, JournalRecordIds.ADD_REF, new RefEncoding(queueID), last && syncNonTransactional, getContext(last && syncNonTransactional));
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void readLock() {
- storageManagerLock.readLock().lock();
- }
-
- @Override
- public void readUnLock() {
- storageManagerLock.readLock().unlock();
- }
-
- @Override
- public void storeAcknowledge(final long queueID, final long messageID) throws Exception {
- readLock();
- try {
- messageJournal.appendUpdateRecord(messageID, JournalRecordIds.ACKNOWLEDGE_REF, new RefEncoding(queueID), syncNonTransactional, getContext(syncNonTransactional));
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void storeCursorAcknowledge(long queueID, PagePosition position) throws Exception {
- readLock();
- try {
- long ackID = idGenerator.generateID();
- position.setRecordID(ackID);
- messageJournal.appendAddRecord(ackID, JournalRecordIds.ACKNOWLEDGE_CURSOR, new CursorAckRecordEncoding(queueID, position), syncNonTransactional, getContext(syncNonTransactional));
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void deleteMessage(final long messageID) throws Exception {
- readLock();
- try {
- // Messages are deleted on postACK, one after another.
- // If these deletes are synchronized, we would build up messages on the Executor
- // increasing chances of losing deletes.
- // The StorageManager should verify messages without references
- messageJournal.appendDeleteRecord(messageID, false, getContext(false));
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void updateScheduledDeliveryTime(final MessageReference ref) throws Exception {
- ScheduledDeliveryEncoding encoding = new ScheduledDeliveryEncoding(ref.getScheduledDeliveryTime(), ref.getQueue().getID());
- readLock();
- try {
- messageJournal.appendUpdateRecord(ref.getMessage().getMessageID(), JournalRecordIds.SET_SCHEDULED_DELIVERY_TIME, encoding, syncNonTransactional, getContext(syncNonTransactional));
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void storeDuplicateID(final SimpleString address, final byte[] duplID, final long recordID) throws Exception {
- readLock();
- try {
- DuplicateIDEncoding encoding = new DuplicateIDEncoding(address, duplID);
-
- messageJournal.appendAddRecord(recordID, JournalRecordIds.DUPLICATE_ID, encoding, syncNonTransactional, getContext(syncNonTransactional));
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void deleteDuplicateID(final long recordID) throws Exception {
- readLock();
- try {
- messageJournal.appendDeleteRecord(recordID, syncNonTransactional, getContext(syncNonTransactional));
- }
- finally {
- readUnLock();
- }
- }
-
- // Transactional operations
-
- @Override
- public void storeMessageTransactional(final long txID, final ServerMessage message) throws Exception {
- if (message.getMessageID() <= 0) {
- throw ActiveMQMessageBundle.BUNDLE.messageIdNotAssigned();
- }
-
- readLock();
- try {
- if (message.isLargeMessage()) {
- messageJournal.appendAddRecordTransactional(txID, message.getMessageID(), JournalRecordIds.ADD_LARGE_MESSAGE, new LargeMessageEncoding(((LargeServerMessage) message)));
- }
- else {
- messageJournal.appendAddRecordTransactional(txID, message.getMessageID(), JournalRecordIds.ADD_MESSAGE, message);
- }
-
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void storePageTransaction(final long txID, final PageTransactionInfo pageTransaction) throws Exception {
- readLock();
- try {
- pageTransaction.setRecordID(generateID());
- messageJournal.appendAddRecordTransactional(txID, pageTransaction.getRecordID(), JournalRecordIds.PAGE_TRANSACTION, pageTransaction);
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void updatePageTransaction(final long txID,
- final PageTransactionInfo pageTransaction,
- final int depages) throws Exception {
- readLock();
- try {
- messageJournal.appendUpdateRecordTransactional(txID, pageTransaction.getRecordID(), JournalRecordIds.PAGE_TRANSACTION, new PageUpdateTXEncoding(pageTransaction.getTransactionID(), depages));
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void updatePageTransaction(final PageTransactionInfo pageTransaction, final int depages) throws Exception {
- readLock();
- try {
- messageJournal.appendUpdateRecord(pageTransaction.getRecordID(), JournalRecordIds.PAGE_TRANSACTION, new PageUpdateTXEncoding(pageTransaction.getTransactionID(), depages), syncNonTransactional, getContext(syncNonTransactional));
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void storeReferenceTransactional(final long txID, final long queueID, final long messageID) throws Exception {
- readLock();
- try {
- messageJournal.appendUpdateRecordTransactional(txID, messageID, JournalRecordIds.ADD_REF, new RefEncoding(queueID));
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void storeAcknowledgeTransactional(final long txID,
- final long queueID,
- final long messageID) throws Exception {
- readLock();
- try {
- messageJournal.appendUpdateRecordTransactional(txID, messageID, JournalRecordIds.ACKNOWLEDGE_REF, new RefEncoding(queueID));
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void storeCursorAcknowledgeTransactional(long txID, long queueID, PagePosition position) throws Exception {
- readLock();
- try {
- long ackID = idGenerator.generateID();
- position.setRecordID(ackID);
- messageJournal.appendAddRecordTransactional(txID, ackID, JournalRecordIds.ACKNOWLEDGE_CURSOR, new CursorAckRecordEncoding(queueID, position));
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void storePageCompleteTransactional(long txID, long queueID, PagePosition position) throws Exception {
- long recordID = idGenerator.generateID();
- position.setRecordID(recordID);
- messageJournal.appendAddRecordTransactional(txID, recordID, JournalRecordIds.PAGE_CURSOR_COMPLETE, new CursorAckRecordEncoding(queueID, position));
- }
-
- @Override
- public void deletePageComplete(long ackID) throws Exception {
- messageJournal.appendDeleteRecord(ackID, false);
- }
-
- @Override
- public void deleteCursorAcknowledgeTransactional(long txID, long ackID) throws Exception {
- readLock();
- try {
- messageJournal.appendDeleteRecordTransactional(txID, ackID);
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void deleteCursorAcknowledge(long ackID) throws Exception {
- messageJournal.appendDeleteRecord(ackID, false);
- }
-
- @Override
- public long storeHeuristicCompletion(final Xid xid, final boolean isCommit) throws Exception {
- readLock();
- try {
- long id = generateID();
-
- messageJournal.appendAddRecord(id, JournalRecordIds.HEURISTIC_COMPLETION, new HeuristicCompletionEncoding(xid, isCommit), true, getContext(true));
- return id;
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void deleteHeuristicCompletion(final long id) throws Exception {
- readLock();
- try {
-
- messageJournal.appendDeleteRecord(id, true, getContext(true));
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void deletePageTransactional(final long recordID) throws Exception {
- readLock();
- try {
- messageJournal.appendDeleteRecord(recordID, false);
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void updateScheduledDeliveryTimeTransactional(final long txID, final MessageReference ref) throws Exception {
- ScheduledDeliveryEncoding encoding = new ScheduledDeliveryEncoding(ref.getScheduledDeliveryTime(), ref.getQueue().getID());
- readLock();
- try {
-
- messageJournal.appendUpdateRecordTransactional(txID, ref.getMessage().getMessageID(), JournalRecordIds.SET_SCHEDULED_DELIVERY_TIME, encoding);
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void prepare(final long txID, final Xid xid) throws Exception {
- readLock();
- try {
- messageJournal.appendPrepareRecord(txID, new XidEncoding(xid), syncTransactional, getContext(syncTransactional));
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void commit(final long txID) throws Exception {
- commit(txID, true);
- }
-
- @Override
- public void commitBindings(final long txID) throws Exception {
- bindingsJournal.appendCommitRecord(txID, true);
- }
-
- @Override
- public void rollbackBindings(final long txID) throws Exception {
- // no need to sync, it's going away anyways
- bindingsJournal.appendRollbackRecord(txID, false);
- }
-
- @Override
- public void commit(final long txID, final boolean lineUpContext) throws Exception {
- readLock();
- try {
- messageJournal.appendCommitRecord(txID, syncTransactional, getContext(syncTransactional), lineUpContext);
- if (!lineUpContext && !syncTransactional) {
- /**
- * If {@code lineUpContext == false}, it means that we have previously lined up a
- * context somewhere else (specifically see @{link TransactionImpl#asyncAppendCommit}),
- * hence we need to mark it as done even if {@code syncTransactional = false} as in this
- * case {@code getContext(syncTransactional=false)} would pass a dummy context to the
- * {@code messageJournal.appendCommitRecord(...)} call above.
- */
- getContext(true).done();
- }
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void rollback(final long txID) throws Exception {
- readLock();
- try {
- messageJournal.appendRollbackRecord(txID, syncTransactional, getContext(syncTransactional));
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void storeDuplicateIDTransactional(final long txID,
- final SimpleString address,
- final byte[] duplID,
- final long recordID) throws Exception {
- DuplicateIDEncoding encoding = new DuplicateIDEncoding(address, duplID);
-
- readLock();
- try {
- messageJournal.appendAddRecordTransactional(txID, recordID, JournalRecordIds.DUPLICATE_ID, encoding);
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void updateDuplicateIDTransactional(final long txID,
- final SimpleString address,
- final byte[] duplID,
- final long recordID) throws Exception {
- DuplicateIDEncoding encoding = new DuplicateIDEncoding(address, duplID);
-
- readLock();
- try {
- messageJournal.appendUpdateRecordTransactional(txID, recordID, JournalRecordIds.DUPLICATE_ID, encoding);
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void deleteDuplicateIDTransactional(final long txID, final long recordID) throws Exception {
- readLock();
- try {
- messageJournal.appendDeleteRecordTransactional(txID, recordID);
- }
- finally {
- readUnLock();
- }
- }
-
- // Other operations
-
- @Override
- public void updateDeliveryCount(final MessageReference ref) throws Exception {
- // no need to store if it's the same value
- // otherwise the journal will get OME in case of lots of redeliveries
- if (ref.getDeliveryCount() == ref.getPersistedCount()) {
- return;
- }
-
- ref.setPersistedCount(ref.getDeliveryCount());
- DeliveryCountUpdateEncoding updateInfo = new DeliveryCountUpdateEncoding(ref.getQueue().getID(), ref.getDeliveryCount());
-
- readLock();
- try {
- messageJournal.appendUpdateRecord(ref.getMessage().getMessageID(), JournalRecordIds.UPDATE_DELIVERY_COUNT, updateInfo, syncNonTransactional, getContext(syncNonTransactional));
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void storeAddressSetting(PersistedAddressSetting addressSetting) throws Exception {
- deleteAddressSetting(addressSetting.getAddressMatch());
- readLock();
- try {
- long id = idGenerator.generateID();
- addressSetting.setStoreId(id);
- bindingsJournal.appendAddRecord(id, JournalRecordIds.ADDRESS_SETTING_RECORD, addressSetting, true);
- mapPersistedAddressSettings.put(addressSetting.getAddressMatch(), addressSetting);
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public List recoverAddressSettings() throws Exception {
- ArrayList list = new ArrayList<>(mapPersistedAddressSettings.values());
- return list;
- }
-
- @Override
- public List recoverPersistedRoles() throws Exception {
- ArrayList list = new ArrayList<>(mapPersistedRoles.values());
- return list;
- }
-
- @Override
- public void storeSecurityRoles(PersistedRoles persistedRoles) throws Exception {
-
- deleteSecurityRoles(persistedRoles.getAddressMatch());
- readLock();
- try {
- final long id = idGenerator.generateID();
- persistedRoles.setStoreId(id);
- bindingsJournal.appendAddRecord(id, JournalRecordIds.SECURITY_RECORD, persistedRoles, true);
- mapPersistedRoles.put(persistedRoles.getAddressMatch(), persistedRoles);
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public final void storeID(final long journalID, final long id) throws Exception {
- readLock();
- try {
- bindingsJournal.appendAddRecord(journalID, JournalRecordIds.ID_COUNTER_RECORD, BatchingIDGenerator.createIDEncodingSupport(id), true);
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void deleteID(long journalD) throws Exception {
- readLock();
- try {
- bindingsJournal.appendDeleteRecord(journalD, false);
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void deleteAddressSetting(SimpleString addressMatch) throws Exception {
- PersistedAddressSetting oldSetting = mapPersistedAddressSettings.remove(addressMatch);
- if (oldSetting != null) {
- readLock();
- try {
- bindingsJournal.appendDeleteRecord(oldSetting.getStoreId(), false);
- }
- finally {
- readUnLock();
- }
- }
- }
-
- @Override
- public void deleteSecurityRoles(SimpleString addressMatch) throws Exception {
- PersistedRoles oldRoles = mapPersistedRoles.remove(addressMatch);
- if (oldRoles != null) {
- readLock();
- try {
- bindingsJournal.appendDeleteRecord(oldRoles.getStoreId(), false);
- }
- finally {
- readUnLock();
- }
- }
- }
-
- @Override
- public JournalLoadInformation loadMessageJournal(final PostOffice postOffice,
- final PagingManager pagingManager,
- final ResourceManager resourceManager,
- Map queueInfos,
- final Map>> duplicateIDMap,
- final Set> pendingLargeMessages,
- List pendingNonTXPageCounter,
- final JournalLoader journalLoader) throws Exception {
- List records = new ArrayList<>();
-
- List preparedTransactions = new ArrayList<>();
-
- Map messages = new HashMap<>();
- readLock();
- try {
-
- JournalLoadInformation info = messageJournal.load(records, preparedTransactions, new LargeMessageTXFailureCallback(messages));
-
- ArrayList largeMessages = new ArrayList<>();
-
- Map> queueMap = new HashMap<>();
-
- Map pageSubscriptions = new HashMap<>();
-
- final int totalSize = records.size();
-
- for (int reccount = 0; reccount < totalSize; reccount++) {
- // It will show log.info only with large journals (more than 1 million records)
- if (reccount > 0 && reccount % 1000000 == 0) {
- long percent = (long) ((((double) reccount) / ((double) totalSize)) * 100f);
-
- ActiveMQServerLogger.LOGGER.percentLoaded(percent);
- }
-
- RecordInfo record = records.get(reccount);
- byte[] data = record.data;
-
- ActiveMQBuffer buff = ActiveMQBuffers.wrappedBuffer(data);
-
- byte recordType = record.getUserRecordType();
-
- switch (recordType) {
- case JournalRecordIds.ADD_LARGE_MESSAGE_PENDING: {
- PendingLargeMessageEncoding pending = new PendingLargeMessageEncoding();
-
- pending.decode(buff);
-
- if (pendingLargeMessages != null) {
- // it could be null on tests, and we don't need anything on that case
- pendingLargeMessages.add(new Pair<>(record.id, pending.largeMessageID));
- }
- break;
- }
- case JournalRecordIds.ADD_LARGE_MESSAGE: {
- LargeServerMessage largeMessage = parseLargeMessage(messages, buff);
-
- messages.put(record.id, largeMessage);
-
- largeMessages.add(largeMessage);
-
- break;
- }
- case JournalRecordIds.ADD_MESSAGE: {
- ServerMessage message = new ServerMessageImpl(record.id, 50);
-
- message.decode(buff);
-
- messages.put(record.id, message);
-
- break;
- }
- case JournalRecordIds.ADD_REF: {
- long messageID = record.id;
-
- RefEncoding encoding = new RefEncoding();
-
- encoding.decode(buff);
-
- Map queueMessages = queueMap.get(encoding.queueID);
-
- if (queueMessages == null) {
- queueMessages = new LinkedHashMap<>();
-
- queueMap.put(encoding.queueID, queueMessages);
- }
-
- ServerMessage message = messages.get(messageID);
-
- if (message == null) {
- ActiveMQServerLogger.LOGGER.cannotFindMessage(record.id);
- }
- else {
- queueMessages.put(messageID, new AddMessageRecord(message));
- }
-
- break;
- }
- case JournalRecordIds.ACKNOWLEDGE_REF: {
- long messageID = record.id;
-
- RefEncoding encoding = new RefEncoding();
-
- encoding.decode(buff);
-
- Map queueMessages = queueMap.get(encoding.queueID);
-
- if (queueMessages == null) {
- ActiveMQServerLogger.LOGGER.journalCannotFindQueue(encoding.queueID, messageID);
- }
- else {
- AddMessageRecord rec = queueMessages.remove(messageID);
-
- if (rec == null) {
- ActiveMQServerLogger.LOGGER.cannotFindMessage(messageID);
- }
- }
-
- break;
- }
- case JournalRecordIds.UPDATE_DELIVERY_COUNT: {
- long messageID = record.id;
-
- DeliveryCountUpdateEncoding encoding = new DeliveryCountUpdateEncoding();
-
- encoding.decode(buff);
-
- Map queueMessages = queueMap.get(encoding.queueID);
-
- if (queueMessages == null) {
- ActiveMQServerLogger.LOGGER.journalCannotFindQueueDelCount(encoding.queueID);
- }
- else {
- AddMessageRecord rec = queueMessages.get(messageID);
-
- if (rec == null) {
- ActiveMQServerLogger.LOGGER.journalCannotFindMessageDelCount(messageID);
- }
- else {
- rec.deliveryCount = encoding.count;
- }
- }
-
- break;
- }
- case JournalRecordIds.PAGE_TRANSACTION: {
- if (record.isUpdate) {
- PageUpdateTXEncoding pageUpdate = new PageUpdateTXEncoding();
-
- pageUpdate.decode(buff);
-
- PageTransactionInfo pageTX = pagingManager.getTransaction(pageUpdate.pageTX);
-
- pageTX.onUpdate(pageUpdate.recods, null, null);
- }
- else {
- PageTransactionInfoImpl pageTransactionInfo = new PageTransactionInfoImpl();
-
- pageTransactionInfo.decode(buff);
-
- pageTransactionInfo.setRecordID(record.id);
-
- pagingManager.addTransaction(pageTransactionInfo);
- }
-
- break;
- }
- case JournalRecordIds.SET_SCHEDULED_DELIVERY_TIME: {
- long messageID = record.id;
-
- ScheduledDeliveryEncoding encoding = new ScheduledDeliveryEncoding();
-
- encoding.decode(buff);
-
- Map queueMessages = queueMap.get(encoding.queueID);
-
- if (queueMessages == null) {
- ActiveMQServerLogger.LOGGER.journalCannotFindQueueScheduled(encoding.queueID, messageID);
- }
- else {
-
- AddMessageRecord rec = queueMessages.get(messageID);
-
- if (rec == null) {
- ActiveMQServerLogger.LOGGER.cannotFindMessage(messageID);
- }
- else {
- rec.scheduledDeliveryTime = encoding.scheduledDeliveryTime;
- }
- }
-
- break;
- }
- case JournalRecordIds.DUPLICATE_ID: {
- DuplicateIDEncoding encoding = new DuplicateIDEncoding();
-
- encoding.decode(buff);
-
- List> ids = duplicateIDMap.get(encoding.address);
-
- if (ids == null) {
- ids = new ArrayList<>();
-
- duplicateIDMap.put(encoding.address, ids);
- }
-
- ids.add(new Pair<>(encoding.duplID, record.id));
-
- break;
- }
- case JournalRecordIds.HEURISTIC_COMPLETION: {
- HeuristicCompletionEncoding encoding = new HeuristicCompletionEncoding();
- encoding.decode(buff);
- resourceManager.putHeuristicCompletion(record.id, encoding.xid, encoding.isCommit);
- break;
- }
- case JournalRecordIds.ACKNOWLEDGE_CURSOR: {
- CursorAckRecordEncoding encoding = new CursorAckRecordEncoding();
- encoding.decode(buff);
-
- encoding.position.setRecordID(record.id);
-
- PageSubscription sub = locateSubscription(encoding.queueID, pageSubscriptions, queueInfos, pagingManager);
-
- if (sub != null) {
- sub.reloadACK(encoding.position);
- }
- else {
- ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloading(encoding.queueID);
- messageJournal.appendDeleteRecord(record.id, false);
-
- }
-
- break;
- }
- case JournalRecordIds.PAGE_CURSOR_COUNTER_VALUE: {
- PageCountRecord encoding = new PageCountRecord();
-
- encoding.decode(buff);
-
- PageSubscription sub = locateSubscription(encoding.getQueueID(), pageSubscriptions, queueInfos, pagingManager);
-
- if (sub != null) {
- sub.getCounter().loadValue(record.id, encoding.getValue());
- }
- else {
- ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloadingPage(encoding.getQueueID());
- messageJournal.appendDeleteRecord(record.id, false);
- }
-
- break;
- }
-
- case JournalRecordIds.PAGE_CURSOR_COUNTER_INC: {
- PageCountRecordInc encoding = new PageCountRecordInc();
-
- encoding.decode(buff);
-
- PageSubscription sub = locateSubscription(encoding.getQueueID(), pageSubscriptions, queueInfos, pagingManager);
-
- if (sub != null) {
- sub.getCounter().loadInc(record.id, encoding.getValue());
- }
- else {
- ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloadingPageCursor(encoding.getQueueID());
- messageJournal.appendDeleteRecord(record.id, false);
- }
-
- break;
- }
-
- case JournalRecordIds.PAGE_CURSOR_COMPLETE: {
- CursorAckRecordEncoding encoding = new CursorAckRecordEncoding();
- encoding.decode(buff);
-
- encoding.position.setRecordID(record.id);
-
- PageSubscription sub = locateSubscription(encoding.queueID, pageSubscriptions, queueInfos, pagingManager);
-
- if (sub != null) {
- sub.reloadPageCompletion(encoding.position);
- }
- else {
- ActiveMQServerLogger.LOGGER.cantFindQueueOnPageComplete(encoding.queueID);
- messageJournal.appendDeleteRecord(record.id, false);
- }
-
- break;
- }
-
- case JournalRecordIds.PAGE_CURSOR_PENDING_COUNTER: {
-
- PageCountPendingImpl pendingCountEncoding = new PageCountPendingImpl();
- pendingCountEncoding.decode(buff);
- pendingCountEncoding.setID(record.id);
-
- // This can be null on testcases not interested on this outcome
- if (pendingNonTXPageCounter != null) {
- pendingNonTXPageCounter.add(pendingCountEncoding);
- }
- break;
- }
-
- default: {
- throw new IllegalStateException("Invalid record type " + recordType);
- }
- }
-
- // This will free up memory sooner. The record is not needed any more
- // and its byte array would consume memory during the load process even though it's not necessary any longer
- // what would delay processing time during load
- records.set(reccount, null);
- }
-
- // Release the memory as soon as not needed any longer
- records.clear();
- records = null;
-
- journalLoader.handleAddMessage(queueMap);
-
- loadPreparedTransactions(postOffice, pagingManager, resourceManager, queueInfos, preparedTransactions, duplicateIDMap, pageSubscriptions, pendingLargeMessages, journalLoader);
-
- for (PageSubscription sub : pageSubscriptions.values()) {
- sub.getCounter().processReload();
- }
-
- for (LargeServerMessage msg : largeMessages) {
- if (msg.getRefCount() == 0) {
- ActiveMQServerLogger.LOGGER.largeMessageWithNoRef(msg.getMessageID());
- msg.decrementDelayDeletionCount();
- }
- }
-
- journalLoader.handleNoMessageReferences(messages);
-
- // To recover positions on Iterators
- if (pagingManager != null) {
- // it could be null on certain tests that are not dealing with paging
- // This could also be the case in certain embedded conditions
- pagingManager.processReload();
- }
-
- if (perfBlastPages != -1) {
- messageJournal.perfBlast(perfBlastPages);
- }
-
- journalLoader.postLoad(messageJournal, resourceManager, duplicateIDMap);
- journalLoaded = true;
- return info;
- }
- finally {
- readUnLock();
- }
- }
-
- /**
- * @param queueID
- * @param pageSubscriptions
- * @param queueInfos
- * @return
- */
- private static PageSubscription locateSubscription(final long queueID,
- final Map pageSubscriptions,
- final Map queueInfos,
- final PagingManager pagingManager) throws Exception {
-
- PageSubscription subs = pageSubscriptions.get(queueID);
- if (subs == null) {
- QueueBindingInfo queueInfo = queueInfos.get(queueID);
-
- if (queueInfo != null) {
- SimpleString address = queueInfo.getAddress();
- PagingStore store = pagingManager.getPageStore(address);
- subs = store.getCursorProvider().getSubscription(queueID);
- pageSubscriptions.put(queueID, subs);
- }
- }
-
- return subs;
- }
-
- // grouping handler operations
- @Override
- public void addGrouping(final GroupBinding groupBinding) throws Exception {
- GroupingEncoding groupingEncoding = new GroupingEncoding(groupBinding.getId(), groupBinding.getGroupId(), groupBinding.getClusterName());
- readLock();
- try {
- bindingsJournal.appendAddRecord(groupBinding.getId(), JournalRecordIds.GROUP_RECORD, groupingEncoding, true);
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void deleteGrouping(long tx, final GroupBinding groupBinding) throws Exception {
- readLock();
- try {
- bindingsJournal.appendDeleteRecordTransactional(tx, groupBinding.getId());
- }
- finally {
- readUnLock();
- }
- }
-
- // BindingsImpl operations
-
- @Override
- public void addQueueBinding(final long tx, final Binding binding) throws Exception {
- Queue queue = (Queue) binding.getBindable();
-
- Filter filter = queue.getFilter();
-
- SimpleString filterString = filter == null ? null : filter.getFilterString();
-
- PersistentQueueBindingEncoding bindingEncoding = new PersistentQueueBindingEncoding(queue.getName(), binding.getAddress(), filterString, queue.getUser(), queue.isAutoCreated());
-
- readLock();
- try {
- bindingsJournal.appendAddRecordTransactional(tx, binding.getID(), JournalRecordIds.QUEUE_BINDING_RECORD, bindingEncoding);
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void deleteQueueBinding(long tx, final long queueBindingID) throws Exception {
- readLock();
- try {
- bindingsJournal.appendDeleteRecordTransactional(tx, queueBindingID);
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public long storePageCounterInc(long txID, long queueID, int value) throws Exception {
- readLock();
- try {
- long recordID = idGenerator.generateID();
- messageJournal.appendAddRecordTransactional(txID, recordID, JournalRecordIds.PAGE_CURSOR_COUNTER_INC, new PageCountRecordInc(queueID, value));
- return recordID;
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public long storePageCounterInc(long queueID, int value) throws Exception {
- readLock();
- try {
- final long recordID = idGenerator.generateID();
- messageJournal.appendAddRecord(recordID, JournalRecordIds.PAGE_CURSOR_COUNTER_INC, new PageCountRecordInc(queueID, value), true, getContext());
- return recordID;
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public long storePageCounter(long txID, long queueID, long value) throws Exception {
- readLock();
- try {
- final long recordID = idGenerator.generateID();
- messageJournal.appendAddRecordTransactional(txID, recordID, JournalRecordIds.PAGE_CURSOR_COUNTER_VALUE, new PageCountRecord(queueID, value));
- return recordID;
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public long storePendingCounter(final long queueID, final long pageID, final int inc) throws Exception {
- readLock();
- try {
- final long recordID = idGenerator.generateID();
- PageCountPendingImpl pendingInc = new PageCountPendingImpl(queueID, pageID, inc);
- // We must guarantee the record sync before we actually write on the page otherwise we may get out of sync
- // on the counter
- messageJournal.appendAddRecord(recordID, JournalRecordIds.PAGE_CURSOR_PENDING_COUNTER, pendingInc, true);
- return recordID;
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void deleteIncrementRecord(long txID, long recordID) throws Exception {
- readLock();
- try {
- messageJournal.appendDeleteRecordTransactional(txID, recordID);
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void deletePageCounter(long txID, long recordID) throws Exception {
- readLock();
- try {
- messageJournal.appendDeleteRecordTransactional(txID, recordID);
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void deletePendingPageCounter(long txID, long recordID) throws Exception {
- readLock();
- try {
- messageJournal.appendDeleteRecordTransactional(txID, recordID);
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public JournalLoadInformation loadBindingJournal(final List queueBindingInfos,
- final List groupingInfos) throws Exception {
- List records = new ArrayList<>();
-
- List preparedTransactions = new ArrayList<>();
-
- JournalLoadInformation bindingsInfo = bindingsJournal.load(records, preparedTransactions, null);
-
- for (RecordInfo record : records) {
- long id = record.id;
-
- ActiveMQBuffer buffer = ActiveMQBuffers.wrappedBuffer(record.data);
-
- byte rec = record.getUserRecordType();
-
- if (rec == JournalRecordIds.QUEUE_BINDING_RECORD) {
- PersistentQueueBindingEncoding bindingEncoding = newBindingEncoding(id, buffer);
-
- queueBindingInfos.add(bindingEncoding);
- }
- else if (rec == JournalRecordIds.ID_COUNTER_RECORD) {
- idGenerator.loadState(record.id, buffer);
- }
- else if (rec == JournalRecordIds.GROUP_RECORD) {
- GroupingEncoding encoding = newGroupEncoding(id, buffer);
- groupingInfos.add(encoding);
- }
- else if (rec == JournalRecordIds.ADDRESS_SETTING_RECORD) {
- PersistedAddressSetting setting = newAddressEncoding(id, buffer);
- mapPersistedAddressSettings.put(setting.getAddressMatch(), setting);
- }
- else if (rec == JournalRecordIds.SECURITY_RECORD) {
- PersistedRoles roles = newSecurityRecord(id, buffer);
- mapPersistedRoles.put(roles.getAddressMatch(), roles);
- }
- else {
- throw new IllegalStateException("Invalid record type " + rec);
- }
- }
-
- // This will instruct the IDGenerator to cleanup old records
- idGenerator.cleanup();
-
- return bindingsInfo;
- }
-
- @Override
- public void lineUpContext() {
- readLock();
- try {
- messageJournal.lineUpContext(getContext());
- }
- finally {
- readUnLock();
- }
- }
-
- // ActiveMQComponent implementation
- // ------------------------------------------------------
-
- @Override
- public synchronized void start() throws Exception {
- if (started) {
- return;
- }
-
- checkAndCreateDir(config.getBindingsLocation(), config.isCreateBindingsDir());
-
- checkAndCreateDir(config.getJournalLocation(), config.isCreateJournalDir());
-
- checkAndCreateDir(config.getLargeMessagesLocation(), config.isCreateJournalDir());
-
- cleanupIncompleteFiles();
-
- singleThreadExecutor = Executors.newSingleThreadExecutor(AccessController.doPrivileged(new PrivilegedAction() {
- @Override
- public ActiveMQThreadFactory run() {
- return new ActiveMQThreadFactory("ActiveMQ-IO-SingleThread", true, JournalStorageManager.class.getClassLoader());
- }
- }));
-
- bindingsJournal.start();
-
- messageJournal.start();
-
- started = true;
- }
-
- @Override
- public void stop() throws Exception {
- stop(false);
- }
-
- @Override
- public synchronized void persistIdGenerator() {
- if (journalLoaded && idGenerator != null) {
- // Must call close to make sure last id is persisted
- idGenerator.persistCurrentID();
- }
- }
-
- @Override
- public synchronized void stop(boolean ioCriticalError) throws Exception {
- if (!started) {
- return;
- }
-
- if (!ioCriticalError) {
- performCachedLargeMessageDeletes();
- // Must call close to make sure last id is persisted
- if (journalLoaded && idGenerator != null)
- idGenerator.persistCurrentID();
- }
-
- final CountDownLatch latch = new CountDownLatch(1);
- executor.execute(new Runnable() {
- @Override
- public void run() {
- latch.countDown();
- }
- });
-
- latch.await(30, TimeUnit.SECONDS);
-
- // We cache the variable as the replicator could be changed between here and the time we call stop
- // since sendLiveIsStoping my issue a close back from the channel
- // and we want to ensure a stop here just in case
- ReplicationManager replicatorInUse = replicator;
- if (replicatorInUse != null) {
- final OperationContext token = replicator.sendLiveIsStopping(LiveStopping.FAIL_OVER);
- if (token != null) {
- try {
- token.waitCompletion(5000);
- }
- catch (Exception e) {
- // ignore it
- }
- }
- replicatorInUse.stop();
- }
- bindingsJournal.stop();
-
- messageJournal.stop();
-
- singleThreadExecutor.shutdown();
-
- journalLoaded = false;
-
- started = false;
- }
-
- @Override
- public synchronized boolean isStarted() {
- return started;
- }
-
- /**
- * TODO: Is this still being used ?
- */
- public JournalLoadInformation[] loadInternalOnly() throws Exception {
- readLock();
- try {
- JournalLoadInformation[] info = new JournalLoadInformation[2];
- info[0] = bindingsJournal.loadInternalOnly();
- info[1] = messageJournal.loadInternalOnly();
-
- return info;
- }
- finally {
- readUnLock();
- }
- }
-
- @Override
- public void beforePageRead() throws Exception {
- if (pageMaxConcurrentIO != null) {
- pageMaxConcurrentIO.acquire();
- }
- }
-
- @Override
- public void afterPageRead() throws Exception {
- if (pageMaxConcurrentIO != null) {
- pageMaxConcurrentIO.release();
- }
- }
-
- @Override
- public ByteBuffer allocateDirectBuffer(int size) {
- return journalFF.allocateDirectBuffer(size);
- }
-
- @Override
- public void freeDirectBuffer(ByteBuffer buffer) {
- journalFF.releaseBuffer(buffer);
- }
-
- // Public -----------------------------------------------------------------------------------
-
- @Override
- public Journal getMessageJournal() {
- return messageJournal;
- }
-
- @Override
- public Journal getBindingsJournal() {
- return bindingsJournal;
- }
-
- // Package protected ---------------------------------------------
-
- protected void confirmLargeMessage(final LargeServerMessage largeServerMessage) {
- if (largeServerMessage.getPendingRecordID() >= 0) {
- try {
- confirmPendingLargeMessage(largeServerMessage.getPendingRecordID());
- largeServerMessage.setPendingRecordID(-1);
- }
- catch (Exception e) {
- ActiveMQServerLogger.LOGGER.warn(e.getMessage(), e);
- }
- }
- }
-
- // This should be accessed from this package only
- void deleteLargeMessageFile(final LargeServerMessage largeServerMessage) throws ActiveMQException {
- if (largeServerMessage.getPendingRecordID() < 0) {
- try {
- // The delete file happens asynchronously
- // And the client won't be waiting for the actual file to be deleted.
- // We set a temporary record (short lived) on the journal
- // to avoid a situation where the server is restarted and pending large message stays on forever
- largeServerMessage.setPendingRecordID(storePendingLargeMessage(largeServerMessage.getMessageID()));
- }
- catch (Exception e) {
- throw new ActiveMQInternalErrorException(e.getMessage(), e);
- }
- }
- final SequentialFile file = largeServerMessage.getFile();
- if (file == null) {
- return;
- }
-
- if (largeServerMessage.isDurable() && isReplicated()) {
- readLock();
- try {
- if (isReplicated() && replicator.isSynchronizing()) {
- synchronized (largeMessagesToDelete) {
- largeMessagesToDelete.add(Long.valueOf(largeServerMessage.getMessageID()));
- confirmLargeMessage(largeServerMessage);
- }
- return;
- }
- }
- finally {
- readUnLock();
- }
- }
- Runnable deleteAction = new Runnable() {
- @Override
- public void run() {
- try {
- readLock();
- try {
- if (replicator != null) {
- replicator.largeMessageDelete(largeServerMessage.getMessageID());
- }
- file.delete();
-
- // The confirm could only be done after the actual delete is done
- confirmLargeMessage(largeServerMessage);
- }
- finally {
- readUnLock();
- }
- }
- catch (Exception e) {
- ActiveMQServerLogger.LOGGER.journalErrorDeletingMessage(e, largeServerMessage.getMessageID());
- }
- }
-
- };
-
- if (executor == null) {
- deleteAction.run();
- }
- else {
- executor.execute(deleteAction);
- }
- }
-
- SequentialFile createFileForLargeMessage(final long messageID, final boolean durable) {
- if (durable) {
- return createFileForLargeMessage(messageID, LargeMessageExtension.DURABLE);
- }
- else {
- return createFileForLargeMessage(messageID, LargeMessageExtension.TEMPORARY);
- }
- }
-
- @Override
- public SequentialFile createFileForLargeMessage(final long messageID, LargeMessageExtension extension) {
- return largeMessagesFactory.createSequentialFile(messageID + extension.getExtension());
- }
-
- // Private ----------------------------------------------------------------------------------
-
- private void checkAndCreateDir(final File dir, final boolean create) {
- if (!dir.exists()) {
- if (create) {
- if (!dir.mkdirs()) {
- throw new IllegalStateException("Failed to create directory " + dir);
- }
- }
- else {
- throw ActiveMQMessageBundle.BUNDLE.cannotCreateDir(dir.getAbsolutePath());
- }
- }
- }
-
- /**
- * @param messages
- * @param buff
- * @return
- * @throws Exception
- */
- protected LargeServerMessage parseLargeMessage(final Map messages,
- final ActiveMQBuffer buff) throws Exception {
- LargeServerMessage largeMessage = createLargeMessage();
-
- LargeMessageEncoding messageEncoding = new LargeMessageEncoding(largeMessage);
-
- messageEncoding.decode(buff);
-
- if (largeMessage.containsProperty(Message.HDR_ORIG_MESSAGE_ID)) {
- // for compatibility: couple with old behaviour, copying the old file to avoid message loss
- long originalMessageID = largeMessage.getLongProperty(Message.HDR_ORIG_MESSAGE_ID);
-
- SequentialFile currentFile = createFileForLargeMessage(largeMessage.getMessageID(), true);
-
- if (!currentFile.exists()) {
- SequentialFile linkedFile = createFileForLargeMessage(originalMessageID, true);
- if (linkedFile.exists()) {
- linkedFile.copyTo(currentFile);
- linkedFile.close();
- }
- }
-
- currentFile.close();
- }
-
- return largeMessage;
- }
-
- private void loadPreparedTransactions(final PostOffice postOffice,
- final PagingManager pagingManager,
- final ResourceManager resourceManager,
- final Map queueInfos,
- final List preparedTransactions,
- final Map>> duplicateIDMap,
- final Map pageSubscriptions,
- final Set> pendingLargeMessages,
- JournalLoader journalLoader) throws Exception {
- // recover prepared transactions
- for (PreparedTransactionInfo preparedTransaction : preparedTransactions) {
- XidEncoding encodingXid = new XidEncoding(preparedTransaction.getExtraData());
-
- Xid xid = encodingXid.xid;
-
- Transaction tx = new TransactionImpl(preparedTransaction.getId(), xid, this);
-
- List referencesToAck = new ArrayList<>();
-
- Map messages = new HashMap<>();
-
- // Use same method as load message journal to prune out acks, so they don't get added.
- // Then have reacknowledge(tx) methods on queue, which needs to add the page size
-
- // first get any sent messages for this tx and recreate
- for (RecordInfo record : preparedTransaction.getRecords()) {
- byte[] data = record.data;
-
- ActiveMQBuffer buff = ActiveMQBuffers.wrappedBuffer(data);
-
- byte recordType = record.getUserRecordType();
-
- switch (recordType) {
- case JournalRecordIds.ADD_LARGE_MESSAGE: {
- messages.put(record.id, parseLargeMessage(messages, buff));
-
- break;
- }
- case JournalRecordIds.ADD_MESSAGE: {
- ServerMessage message = new ServerMessageImpl(record.id, 50);
-
- message.decode(buff);
-
- messages.put(record.id, message);
-
- break;
- }
- case JournalRecordIds.ADD_REF: {
- long messageID = record.id;
-
- RefEncoding encoding = new RefEncoding();
-
- encoding.decode(buff);
-
- ServerMessage message = messages.get(messageID);
-
- if (message == null) {
- throw new IllegalStateException("Cannot find message with id " + messageID);
- }
-
- journalLoader.handlePreparedSendMessage(message, tx, encoding.queueID);
-
- break;
- }
- case JournalRecordIds.ACKNOWLEDGE_REF: {
- long messageID = record.id;
-
- RefEncoding encoding = new RefEncoding();
-
- encoding.decode(buff);
-
- journalLoader.handlePreparedAcknowledge(messageID, referencesToAck, encoding.queueID);
-
- break;
- }
- case JournalRecordIds.PAGE_TRANSACTION: {
-
- PageTransactionInfo pageTransactionInfo = new PageTransactionInfoImpl();
-
- pageTransactionInfo.decode(buff);
-
- if (record.isUpdate) {
- PageTransactionInfo pgTX = pagingManager.getTransaction(pageTransactionInfo.getTransactionID());
- pgTX.reloadUpdate(this, pagingManager, tx, pageTransactionInfo.getNumberOfMessages());
- }
- else {
- pageTransactionInfo.setCommitted(false);
-
- tx.putProperty(TransactionPropertyIndexes.PAGE_TRANSACTION, pageTransactionInfo);
-
- pagingManager.addTransaction(pageTransactionInfo);
-
- tx.addOperation(new FinishPageMessageOperation());
- }
-
- break;
- }
- case SET_SCHEDULED_DELIVERY_TIME: {
- // Do nothing - for prepared txs, the set scheduled delivery time will only occur in a send in which
- // case the message will already have the header for the scheduled delivery time, so no need to do
- // anything.
-
- break;
- }
- case DUPLICATE_ID: {
- // We need load the duplicate ids at prepare time too
- DuplicateIDEncoding encoding = new DuplicateIDEncoding();
-
- encoding.decode(buff);
-
- DuplicateIDCache cache = postOffice.getDuplicateIDCache(encoding.address);
-
- cache.load(tx, encoding.duplID);
-
- break;
- }
- case ACKNOWLEDGE_CURSOR: {
- CursorAckRecordEncoding encoding = new CursorAckRecordEncoding();
- encoding.decode(buff);
-
- encoding.position.setRecordID(record.id);
-
- PageSubscription sub = locateSubscription(encoding.queueID, pageSubscriptions, queueInfos, pagingManager);
-
- if (sub != null) {
- sub.reloadPreparedACK(tx, encoding.position);
- referencesToAck.add(new PagedReferenceImpl(encoding.position, null, sub));
- }
- else {
- ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloadingACK(encoding.queueID);
- }
- break;
- }
- case PAGE_CURSOR_COUNTER_VALUE: {
- ActiveMQServerLogger.LOGGER.journalPAGEOnPrepared();
-
- break;
- }
-
- case PAGE_CURSOR_COUNTER_INC: {
- PageCountRecordInc encoding = new PageCountRecordInc();
-
- encoding.decode(buff);
-
- PageSubscription sub = locateSubscription(encoding.getQueueID(), pageSubscriptions, queueInfos, pagingManager);
-
- if (sub != null) {
- sub.getCounter().applyIncrementOnTX(tx, record.id, encoding.getValue());
- sub.notEmpty();
- }
- else {
- ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloadingACK(encoding.getQueueID());
- }
-
- break;
- }
-
- default: {
- ActiveMQServerLogger.LOGGER.journalInvalidRecordType(recordType);
- }
- }
- }
-
- for (RecordInfo recordDeleted : preparedTransaction.getRecordsToDelete()) {
- byte[] data = recordDeleted.data;
-
- if (data.length > 0) {
- ActiveMQBuffer buff = ActiveMQBuffers.wrappedBuffer(data);
- byte b = buff.readByte();
-
- switch (b) {
- case ADD_LARGE_MESSAGE_PENDING: {
- long messageID = buff.readLong();
- if (!pendingLargeMessages.remove(new Pair<>(recordDeleted.id, messageID))) {
- ActiveMQServerLogger.LOGGER.largeMessageNotFound(recordDeleted.id);
- }
- installLargeMessageConfirmationOnTX(tx, recordDeleted.id);
- break;
- }
- default:
- ActiveMQServerLogger.LOGGER.journalInvalidRecordTypeOnPreparedTX(b);
- }
- }
-
- }
-
- journalLoader.handlePreparedTransaction(tx, referencesToAck, xid, resourceManager);
- }
- }
-
- private void cleanupIncompleteFiles() throws Exception {
- if (largeMessagesFactory != null) {
- List tmpFiles = largeMessagesFactory.listFiles("tmp");
- for (String tmpFile : tmpFiles) {
- SequentialFile file = largeMessagesFactory.createSequentialFile(tmpFile);
- file.delete();
- }
- }
- }
-
- private OperationContext getContext(final boolean sync) {
- if (sync) {
- return getContext();
- }
- else {
- return DummyOperationContext.getInstance();
- }
- }
-
- // Inner Classes
- // ----------------------------------------------------------------------------
-
- private static final class DummyOperationContext implements OperationContext {
-
- private static DummyOperationContext instance = new DummyOperationContext();
-
- public static OperationContext getInstance() {
- return DummyOperationContext.instance;
- }
-
- @Override
- public void executeOnCompletion(final IOCallback runnable) {
- // There are no executeOnCompletion calls while using the DummyOperationContext
- // However we keep the code here for correctness
- runnable.done();
- }
-
- @Override
- public void replicationDone() {
- }
-
- @Override
- public void replicationLineUp() {
- }
-
- @Override
- public void storeLineUp() {
- }
-
- @Override
- public void done() {
- }
-
- @Override
- public void onError(final int errorCode, final String errorMessage) {
- }
-
- @Override
- public void waitCompletion() {
- }
-
- @Override
- public boolean waitCompletion(final long timeout) {
- return true;
- }
-
- @Override
- public void pageSyncLineUp() {
- }
-
- @Override
- public void pageSyncDone() {
- }
- }
-
- /*
- * @param id
- * @param buffer
- * @return
- */
- protected static PersistedRoles newSecurityRecord(long id, ActiveMQBuffer buffer) {
- PersistedRoles roles = new PersistedRoles();
- roles.decode(buffer);
- roles.setStoreId(id);
- return roles;
- }
-
- /**
- * @param id
- * @param buffer
- * @return
- */
- static PersistedAddressSetting newAddressEncoding(long id, ActiveMQBuffer buffer) {
- PersistedAddressSetting setting = new PersistedAddressSetting();
- setting.decode(buffer);
- setting.setStoreId(id);
- return setting;
- }
-
- /**
- * @param id
- * @param buffer
- * @return
- */
- static GroupingEncoding newGroupEncoding(long id, ActiveMQBuffer buffer) {
- GroupingEncoding encoding = new GroupingEncoding();
- encoding.decode(buffer);
- encoding.setId(id);
- return encoding;
- }
-
- /**
- * @param id
- * @param buffer
- * @return
- */
- protected static PersistentQueueBindingEncoding newBindingEncoding(long id, ActiveMQBuffer buffer) {
- PersistentQueueBindingEncoding bindingEncoding = new PersistentQueueBindingEncoding();
-
- bindingEncoding.decode(buffer);
-
- bindingEncoding.setId(id);
- return bindingEncoding;
- }
-
- @Override
- public boolean addToPage(PagingStore store,
- ServerMessage msg,
- Transaction tx,
- RouteContextList listCtx) throws Exception {
- /**
- * Exposing the read-lock here is an encapsulation violation done in order to keep the code
- * simpler. The alternative would be to add a second method, say 'verifyPaging', to
- * PagingStore.
- *
- * Adding this second method would also be more surprise prone as it would require a certain
- * calling order.
- *
- * The reasoning is that exposing the lock is more explicit and therefore `less bad`.
- */
- return store.page(msg, tx, listCtx, storageManagerLock.readLock());
- }
-
- private void installLargeMessageConfirmationOnTX(Transaction tx, long recordID) {
- TXLargeMessageConfirmationOperation txoper = (TXLargeMessageConfirmationOperation) tx.getProperty(TransactionPropertyIndexes.LARGE_MESSAGE_CONFIRMATIONS);
- if (txoper == null) {
- txoper = new TXLargeMessageConfirmationOperation();
- tx.putProperty(TransactionPropertyIndexes.LARGE_MESSAGE_CONFIRMATIONS, txoper);
- }
- txoper.confirmedMessages.add(recordID);
- }
-}
+}
\ No newline at end of file
diff --git a/artemis-server/src/main/java/org/apache/activemq/artemis/core/protocol/core/impl/wireformat/ReplicationStartSyncMessage.java b/artemis-server/src/main/java/org/apache/activemq/artemis/core/protocol/core/impl/wireformat/ReplicationStartSyncMessage.java
index 56c946109c..d31db8b1c2 100644
--- a/artemis-server/src/main/java/org/apache/activemq/artemis/core/protocol/core/impl/wireformat/ReplicationStartSyncMessage.java
+++ b/artemis-server/src/main/java/org/apache/activemq/artemis/core/protocol/core/impl/wireformat/ReplicationStartSyncMessage.java
@@ -22,7 +22,7 @@ import java.util.List;
import org.apache.activemq.artemis.api.core.ActiveMQBuffer;
import org.apache.activemq.artemis.core.journal.impl.JournalFile;
-import org.apache.activemq.artemis.core.persistence.impl.journal.JournalStorageManager.JournalContent;
+import org.apache.activemq.artemis.core.persistence.impl.journal.AbstractJournalStorageManager;
import org.apache.activemq.artemis.core.protocol.core.impl.PacketImpl;
/**
@@ -40,8 +40,8 @@ public class ReplicationStartSyncMessage extends PacketImpl {
private boolean allowsAutoFailBack;
public enum SyncDataType {
- JournalBindings(JournalContent.BINDINGS.typeByte),
- JournalMessages(JournalContent.MESSAGES.typeByte),
+ JournalBindings(AbstractJournalStorageManager.JournalContent.BINDINGS.typeByte),
+ JournalMessages(AbstractJournalStorageManager.JournalContent.MESSAGES.typeByte),
LargeMessages((byte) 2);
private byte code;
@@ -50,8 +50,8 @@ public class ReplicationStartSyncMessage extends PacketImpl {
this.code = code;
}
- public static JournalContent getJournalContentType(SyncDataType dataType) {
- return JournalContent.getType(dataType.code);
+ public static AbstractJournalStorageManager.JournalContent getJournalContentType(SyncDataType dataType) {
+ return AbstractJournalStorageManager.JournalContent.getType(dataType.code);
}
public static SyncDataType getDataType(byte code) {
@@ -86,7 +86,7 @@ public class ReplicationStartSyncMessage extends PacketImpl {
}
public ReplicationStartSyncMessage(JournalFile[] datafiles,
- JournalContent contentType,
+ AbstractJournalStorageManager.JournalContent contentType,
String nodeID,
boolean allowsAutoFailBack) {
this();
diff --git a/artemis-server/src/main/java/org/apache/activemq/artemis/core/protocol/core/impl/wireformat/ReplicationSyncFileMessage.java b/artemis-server/src/main/java/org/apache/activemq/artemis/core/protocol/core/impl/wireformat/ReplicationSyncFileMessage.java
index 6c3007ce77..ed5901f964 100644
--- a/artemis-server/src/main/java/org/apache/activemq/artemis/core/protocol/core/impl/wireformat/ReplicationSyncFileMessage.java
+++ b/artemis-server/src/main/java/org/apache/activemq/artemis/core/protocol/core/impl/wireformat/ReplicationSyncFileMessage.java
@@ -23,7 +23,7 @@ import java.util.Set;
import org.apache.activemq.artemis.api.core.ActiveMQBuffer;
import org.apache.activemq.artemis.api.core.SimpleString;
-import org.apache.activemq.artemis.core.persistence.impl.journal.JournalStorageManager.JournalContent;
+import org.apache.activemq.artemis.core.persistence.impl.journal.AbstractJournalStorageManager;
import org.apache.activemq.artemis.core.protocol.core.impl.PacketImpl;
/**
@@ -35,7 +35,7 @@ public final class ReplicationSyncFileMessage extends PacketImpl {
/**
* The JournalType or {@code null} if sync'ing large-messages.
*/
- private JournalContent journalType;
+ private AbstractJournalStorageManager.JournalContent journalType;
/**
* This value refers to {@link org.apache.activemq.artemis.core.journal.impl.JournalFile#getFileID()}, or the
* message id if we are sync'ing a large-message.
@@ -74,7 +74,7 @@ public final class ReplicationSyncFileMessage extends PacketImpl {
super(REPLICATION_SYNC_FILE);
}
- public ReplicationSyncFileMessage(JournalContent content,
+ public ReplicationSyncFileMessage(AbstractJournalStorageManager.JournalContent content,
SimpleString storeName,
long id,
int size,
@@ -135,7 +135,7 @@ public final class ReplicationSyncFileMessage extends PacketImpl {
fileId = buffer.readLong();
switch (FileType.getFileType(buffer.readByte())) {
case JOURNAL: {
- journalType = JournalContent.getType(buffer.readByte());
+ journalType = AbstractJournalStorageManager.JournalContent.getType(buffer.readByte());
fileType = FileType.JOURNAL;
break;
}
@@ -160,7 +160,7 @@ public final class ReplicationSyncFileMessage extends PacketImpl {
return fileId;
}
- public JournalContent getJournalContent() {
+ public AbstractJournalStorageManager.JournalContent getJournalContent() {
return journalType;
}
diff --git a/artemis-server/src/main/java/org/apache/activemq/artemis/core/replication/ReplicationEndpoint.java b/artemis-server/src/main/java/org/apache/activemq/artemis/core/replication/ReplicationEndpoint.java
index 63e393520f..5b6dbfb4c4 100644
--- a/artemis-server/src/main/java/org/apache/activemq/artemis/core/replication/ReplicationEndpoint.java
+++ b/artemis-server/src/main/java/org/apache/activemq/artemis/core/replication/ReplicationEndpoint.java
@@ -47,7 +47,7 @@ import org.apache.activemq.artemis.core.paging.impl.Page;
import org.apache.activemq.artemis.core.paging.impl.PagingManagerImpl;
import org.apache.activemq.artemis.core.paging.impl.PagingStoreFactoryNIO;
import org.apache.activemq.artemis.core.persistence.StorageManager;
-import org.apache.activemq.artemis.core.persistence.impl.journal.JournalStorageManager.JournalContent;
+import org.apache.activemq.artemis.core.persistence.impl.journal.AbstractJournalStorageManager.JournalContent;
import org.apache.activemq.artemis.core.persistence.impl.journal.LargeServerMessageInSync;
import org.apache.activemq.artemis.core.protocol.core.Channel;
import org.apache.activemq.artemis.core.protocol.core.ChannelHandler;
diff --git a/artemis-server/src/main/java/org/apache/activemq/artemis/core/replication/ReplicationManager.java b/artemis-server/src/main/java/org/apache/activemq/artemis/core/replication/ReplicationManager.java
index 255415bdb4..a8984fc89f 100644
--- a/artemis-server/src/main/java/org/apache/activemq/artemis/core/replication/ReplicationManager.java
+++ b/artemis-server/src/main/java/org/apache/activemq/artemis/core/replication/ReplicationManager.java
@@ -32,12 +32,12 @@ import org.apache.activemq.artemis.api.core.ActiveMQExceptionType;
import org.apache.activemq.artemis.api.core.Pair;
import org.apache.activemq.artemis.api.core.SimpleString;
import org.apache.activemq.artemis.api.core.client.SessionFailureListener;
-import org.apache.activemq.artemis.core.journal.EncodingSupport;
import org.apache.activemq.artemis.core.io.SequentialFile;
+import org.apache.activemq.artemis.core.journal.EncodingSupport;
import org.apache.activemq.artemis.core.journal.impl.JournalFile;
import org.apache.activemq.artemis.core.paging.PagedMessage;
import org.apache.activemq.artemis.core.persistence.OperationContext;
-import org.apache.activemq.artemis.core.persistence.impl.journal.JournalStorageManager.JournalContent;
+import org.apache.activemq.artemis.core.persistence.impl.journal.AbstractJournalStorageManager;
import org.apache.activemq.artemis.core.persistence.impl.journal.OperationContextImpl;
import org.apache.activemq.artemis.core.protocol.core.Channel;
import org.apache.activemq.artemis.core.protocol.core.ChannelHandler;
@@ -437,7 +437,7 @@ public final class ReplicationManager implements ActiveMQComponent {
* @throws ActiveMQException
* @throws Exception
*/
- public void syncJournalFile(JournalFile jf, JournalContent content) throws Exception {
+ public void syncJournalFile(JournalFile jf, AbstractJournalStorageManager.JournalContent content) throws Exception {
if (!enabled) {
return;
}
@@ -473,7 +473,7 @@ public final class ReplicationManager implements ActiveMQComponent {
* @param maxBytesToSend maximum number of bytes to read and send from the file
* @throws Exception
*/
- private void sendLargeFile(JournalContent content,
+ private void sendLargeFile(AbstractJournalStorageManager.JournalContent content,
SimpleString pageStore,
final long id,
SequentialFile file,
@@ -536,7 +536,7 @@ public final class ReplicationManager implements ActiveMQComponent {
* @throws ActiveMQException
*/
public void sendStartSyncMessage(JournalFile[] datafiles,
- JournalContent contentType,
+ AbstractJournalStorageManager.JournalContent contentType,
String nodeID,
boolean allowsAutoFailBack) throws ActiveMQException {
if (enabled)
diff --git a/artemis-server/src/main/java/org/apache/activemq/artemis/core/security/SecurityAuth.java b/artemis-server/src/main/java/org/apache/activemq/artemis/core/security/SecurityAuth.java
index e7d4bb2e79..0cdb77d0ef 100644
--- a/artemis-server/src/main/java/org/apache/activemq/artemis/core/security/SecurityAuth.java
+++ b/artemis-server/src/main/java/org/apache/activemq/artemis/core/security/SecurityAuth.java
@@ -6,7 +6,7 @@
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/artemis-server/src/main/java/org/apache/activemq/artemis/core/server/impl/ActiveMQServerImpl.java b/artemis-server/src/main/java/org/apache/activemq/artemis/core/server/impl/ActiveMQServerImpl.java
index ae93a97a2f..58993fc218 100644
--- a/artemis-server/src/main/java/org/apache/activemq/artemis/core/server/impl/ActiveMQServerImpl.java
+++ b/artemis-server/src/main/java/org/apache/activemq/artemis/core/server/impl/ActiveMQServerImpl.java
@@ -51,6 +51,7 @@ import org.apache.activemq.artemis.core.config.Configuration;
import org.apache.activemq.artemis.core.config.ConfigurationUtils;
import org.apache.activemq.artemis.core.config.CoreQueueConfiguration;
import org.apache.activemq.artemis.core.config.DivertConfiguration;
+import org.apache.activemq.artemis.core.config.StoreConfiguration;
import org.apache.activemq.artemis.core.config.impl.ConfigurationImpl;
import org.apache.activemq.artemis.core.filter.Filter;
import org.apache.activemq.artemis.core.filter.impl.FilterImpl;
@@ -70,6 +71,7 @@ import org.apache.activemq.artemis.core.persistence.StorageManager;
import org.apache.activemq.artemis.core.persistence.config.PersistedAddressSetting;
import org.apache.activemq.artemis.core.persistence.config.PersistedRoles;
import org.apache.activemq.artemis.core.persistence.impl.PageCountPending;
+import org.apache.activemq.artemis.core.persistence.impl.journal.JDBCJournalStorageManager;
import org.apache.activemq.artemis.core.persistence.impl.journal.JournalStorageManager;
import org.apache.activemq.artemis.core.persistence.impl.journal.OperationContextImpl;
import org.apache.activemq.artemis.core.persistence.impl.nullpm.NullStorageManager;
@@ -1479,7 +1481,13 @@ public class ActiveMQServerImpl implements ActiveMQServer {
*/
private StorageManager createStorageManager() {
if (configuration.isPersistenceEnabled()) {
- return new JournalStorageManager(configuration, executorFactory, shutdownOnCriticalIO);
+ if (configuration.getStoreConfiguration() != null && configuration.getStoreConfiguration().getStoreType() == StoreConfiguration.StoreType.DATABASE) {
+ return new JDBCJournalStorageManager(configuration, executorFactory, shutdownOnCriticalIO);
+ }
+ // Default to File Based Storage Manager, (Legacy default configuration).
+ else {
+ return new JournalStorageManager(configuration, executorFactory, shutdownOnCriticalIO);
+ }
}
return new NullStorageManager();
}
diff --git a/artemis-server/src/main/resources/schema/artemis-configuration.xsd b/artemis-server/src/main/resources/schema/artemis-configuration.xsd
index 59b0491051..91740194d6 100644
--- a/artemis-server/src/main/resources/schema/artemis-configuration.xsd
+++ b/artemis-server/src/main/resources/schema/artemis-configuration.xsd
@@ -683,6 +683,14 @@
+
+
+
+ The Store Type used by the server
+
+
+
+
@@ -1437,6 +1445,55 @@
+
+
+
+
+
+
+ Use a file based store for peristing journal, paging and large messages
+
+
+
+
+
+
+ Use a database for persisting journal, paging and large messages
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The JDBC Connection URL e.g. jdbc:mysql://localhost:3306/
+
+
+
+
+
+
+ The table name used to store message journal entries
+
+
+
+
+
+
+ The table name used to store bindings journal entries
+
+
+
+
+
+
@@ -1489,6 +1546,7 @@
+
diff --git a/artemis-server/src/test/java/org/apache/activemq/artemis/core/config/impl/DatabaseStoreConfigurationTest.java b/artemis-server/src/test/java/org/apache/activemq/artemis/core/config/impl/DatabaseStoreConfigurationTest.java
new file mode 100644
index 0000000000..4934cbd592
--- /dev/null
+++ b/artemis-server/src/test/java/org/apache/activemq/artemis/core/config/impl/DatabaseStoreConfigurationTest.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.activemq.artemis.core.config.impl;
+
+import org.apache.activemq.artemis.core.config.Configuration;
+import org.apache.activemq.artemis.core.config.FileDeploymentManager;
+import org.apache.activemq.artemis.core.config.StoreConfiguration;
+import org.apache.activemq.artemis.core.server.impl.ActiveMQServerImpl;
+import org.apache.activemq.artemis.tests.util.ActiveMQTestBase;
+import org.junit.Test;
+
+public class DatabaseStoreConfigurationTest extends ActiveMQTestBase {
+
+ @Test
+ public void databaseStoreConfigTest() throws Exception {
+ Configuration configuration = createConfiguration("database-store-config.xml");
+ ActiveMQServerImpl server = new ActiveMQServerImpl(configuration);
+ assertEquals(StoreConfiguration.StoreType.DATABASE, server.getConfiguration().getStoreConfiguration().getStoreType());
+ }
+
+ protected Configuration createConfiguration(String fileName) throws Exception {
+ FileConfiguration fc = new FileConfiguration();
+ FileDeploymentManager deploymentManager = new FileDeploymentManager(fileName);
+ deploymentManager.addDeployable(fc);
+
+ deploymentManager.readConfiguration();
+
+ // we need this otherwise the data folder will be located under activemq-server and not on the temporary directory
+ fc.setPagingDirectory(getTestDir() + "/" + fc.getPagingDirectory());
+ fc.setLargeMessagesDirectory(getTestDir() + "/" + fc.getLargeMessagesDirectory());
+ fc.setJournalDirectory(getTestDir() + "/" + fc.getJournalDirectory());
+ fc.setBindingsDirectory(getTestDir() + "/" + fc.getBindingsDirectory());
+
+ return fc;
+ }
+}
diff --git a/artemis-server/src/test/java/org/apache/activemq/artemis/tests/util/ActiveMQTestBase.java b/artemis-server/src/test/java/org/apache/activemq/artemis/tests/util/ActiveMQTestBase.java
index bd2a1565e4..5d76df8634 100644
--- a/artemis-server/src/test/java/org/apache/activemq/artemis/tests/util/ActiveMQTestBase.java
+++ b/artemis-server/src/test/java/org/apache/activemq/artemis/tests/util/ActiveMQTestBase.java
@@ -80,6 +80,7 @@ import org.apache.activemq.artemis.core.config.ClusterConnectionConfiguration;
import org.apache.activemq.artemis.core.config.Configuration;
import org.apache.activemq.artemis.core.config.impl.ConfigurationImpl;
import org.apache.activemq.artemis.core.config.impl.SecurityConfiguration;
+import org.apache.activemq.artemis.core.config.storage.DatabaseStorageConfiguration;
import org.apache.activemq.artemis.core.io.SequentialFileFactory;
import org.apache.activemq.artemis.core.io.nio.NIOSequentialFileFactory;
import org.apache.activemq.artemis.core.journal.PreparedTransactionInfo;
@@ -387,6 +388,19 @@ public abstract class ActiveMQTestBase extends Assert {
return createDefaultConfig(0, netty);
}
+ protected Configuration createDefaultJDBCConfig() throws Exception {
+ Configuration configuration = createDefaultConfig(true);
+
+ DatabaseStorageConfiguration dbStorageConfiguration = new DatabaseStorageConfiguration();
+ dbStorageConfiguration.setJdbcConnectionUrl(getTestJDBCConnectionUrl());
+ dbStorageConfiguration.setBindingsTableName("BINDINGS");
+ dbStorageConfiguration.setMessageTableName("MESSAGES");
+
+ configuration.setStoreConfiguration(dbStorageConfiguration);
+
+ return configuration;
+ }
+
protected Configuration createDefaultConfig(final int serverID, final boolean netty) throws Exception {
ConfigurationImpl configuration = createBasicConfig(serverID).setJMXManagementEnabled(false).addAcceptorConfiguration(new TransportConfiguration(INVM_ACCEPTOR_FACTORY, generateInVMParams(serverID)));
@@ -749,6 +763,10 @@ public abstract class ActiveMQTestBase extends Assert {
return testDir;
}
+ protected final String getTestJDBCConnectionUrl() {
+ return "jdbc:derby:" + getTestDir() + File.separator + "derby;create=true";
+ }
+
protected final File getTestDirfile() {
return new File(testDir);
}
diff --git a/artemis-server/src/test/resources/database-store-config.xml b/artemis-server/src/test/resources/database-store-config.xml
new file mode 100644
index 0000000000..e387c2cc24
--- /dev/null
+++ b/artemis-server/src/test/resources/database-store-config.xml
@@ -0,0 +1,30 @@
+
+
+
+
+
+ jdbc:derby:target/derby/database-store;create=true
+ BINDINGS_TABLE
+ MESSAGE_TABLE
+
+
+
+
diff --git a/docs/user-manual/en/persistence.md b/docs/user-manual/en/persistence.md
index ccf14ec1bd..e5bcf81526 100644
--- a/docs/user-manual/en/persistence.md
+++ b/docs/user-manual/en/persistence.md
@@ -3,12 +3,14 @@
In this chapter we will describe how persistence works with Apache ActiveMQ Artemis and
how to configure it.
-Apache ActiveMQ Artemis ships with a high performance journal. Since Apache ActiveMQ Artemis handles
-its own persistence, rather than relying on a database or other 3rd
-party persistence engine it is very highly optimised for the specific
-messaging use cases.
+Apache ActiveMQ Artemis ships with two persistence options. The Apache ActiveMQ Artemis File journal
+which is highly optimized for the messaging use case and gives great performance, and also Apache Artemis
+JDBC Store, which uses JDBC to connect to a database of your choice. The JDBC Store is still under development,
+but it is possible to use it's journal features, (essentially everything except for paging and large messages).
-An Apache ActiveMQ Artemis journal is an *append only* journal. It consists of a set of
+## Apache ActiveMQ Artemis File Journal (Default)
+
+An Apache ActiveMQ Artemis file journal is an *append only* journal. It consists of a set of
files on disk. Each file is pre-created to a fixed size and initially
filled with padding. As operations are performed on the server, e.g. add
message, update message, delete message, records are appended to the
@@ -126,7 +128,7 @@ If no persistence is required at all, Apache ActiveMQ Artemis can also be config
not to persist any data at all to storage as discussed in the Configuring
the broker for Zero Persistence section.
-## Configuring the bindings journal
+### Configuring the bindings journal
The bindings journal is configured using the following attributes in
`broker.xml`
@@ -143,11 +145,11 @@ The bindings journal is configured using the following attributes in
`bindings-directory` if it does not already exist. The default value
is `true`
-## Configuring the jms journal
+### Configuring the jms journal
The jms config shares its configuration with the bindings journal.
-## Configuring the message journal
+### Configuring the message journal
The message journal is configured using the following attributes in
`broker.xml`
@@ -297,7 +299,7 @@ The message journal is configured using the following attributes in
The default for this parameter is `30`
-## An important note on disabling disk write cache.
+### An important note on disabling disk write cache.
> **Warning**
>
@@ -336,7 +338,7 @@ The message journal is configured using the following attributes in
> On Windows you can check / change the setting by right clicking on the
> disk and clicking properties.
-## Installing AIO
+### Installing AIO
The Java NIO journal gives great performance, but If you are running
Apache ActiveMQ Artemis using Linux Kernel 2.6 or later, we highly recommend you use
@@ -356,6 +358,40 @@ Using aptitude, (e.g. on Ubuntu or Debian system):
apt-get install libaio
+## Apache ActiveMQ Artemis JDBC Persistence
+
+The Apache ActiveMQ Artemis JDBC persistence store is still under development and only supports persistence of standard messages and bindings (this is everything except large messages and paging). The JDBC store uses a JDBC connection to store messages and bindings data in records in database tables. The data stored in the database tables is encoded using Apache ActiveMQ Artemis journal encoding.
+
+### Configuring JDBC Persistence
+
+To configure Apache ActiveMQ Artemis to use a database for persisting messages and bindings data you must do two things.
+
+1. Add the appropriate JDBC client libraries to the Artemis runtime. You can do this by dropping the relevant jars in the lib folder of the ActiveMQ Artemis distribution.
+
+2. create a store element in your broker.xml config file under the element. For example:
+
+```xml
+
+
+ jdbc:derby:target/derby/database-store;create=true
+ BINDINGS_TABLE
+ MESSAGE_TABLE
+
+
+```
+
+- `jdbc-connection-url`
+
+ The full JDBC connection URL for your database server. The connection url should include all configuration parameters and database name.
+
+- `bindings-table-name`
+
+ The name of the table in which bindings data will be persisted for the ActiveMQ Artemis server. Specifying table names allows users to share single database amongst multiple servers, without interference.
+
+- `message-table-name`
+
+ The name of the table in which messages and related data will be persisted for the ActiveMQ Artemis server. Specifying table names allows users to share single database amongst multiple servers, without interference.
+
## Configuring Apache ActiveMQ Artemis for Zero Persistence
In some situations, zero persistence is sometimes required for a
@@ -366,3 +402,5 @@ straightforward. Simply set the parameter `persistence-enabled` in
Please note that if you set this parameter to false, then *zero*
persistence will occur. That means no bindings data, message data, large
message data, duplicate id caches or paging data will be persisted.
+
+
diff --git a/pom.xml b/pom.xml
index fcbf91b63a..adfb650122 100644
--- a/pom.xml
+++ b/pom.xml
@@ -47,6 +47,7 @@
artemis-ra
artemis-rest
artemis-service-extensions
+ artemis-jdbc-store
artemis-maven-plugin
artemis-server-osgi
integration/activemq-spring-integration
@@ -82,6 +83,7 @@
3.0.13.Final
0.10
1.10
+ 10.11.1.1
true
true
true
@@ -202,6 +204,11 @@
${fuse.mqtt.client.version}