Implementing AMQ-4744: Support using LevelDB as a nested store in mKahaDB

This commit is contained in:
Hiram Chirino 2013-09-27 09:19:23 -04:00
parent 28163a4065
commit f75520fc8b
6 changed files with 188 additions and 32 deletions

View File

@ -37,8 +37,8 @@ import org.apache.activemq.ActiveMQMessageAuditNoSync
import org.fusesource.hawtdispatch import org.fusesource.hawtdispatch
case class EntryLocator(qid:Long, seq:Long) case class EntryLocator(qid:Long, seq:Long)
case class DataLocator(pos:Long, len:Int) case class DataLocator(store:LevelDBStore, pos:Long, len:Int)
case class MessageRecord(id:MessageId, data:Buffer, syncNeeded:Boolean) { case class MessageRecord(store:LevelDBStore, id:MessageId, data:Buffer, syncNeeded:Boolean) {
var locator:DataLocator = _ var locator:DataLocator = _
} }
case class QueueEntryRecord(id:MessageId, queueKey:Long, queueSeq:Long, deliveries:Int=0) case class QueueEntryRecord(id:MessageId, queueKey:Long, queueSeq:Long, deliveries:Int=0)
@ -267,23 +267,35 @@ class DelayableUOW(val manager:DBManager) extends BaseRetained {
val id = message.getMessageId val id = message.getMessageId
val messageRecord = id.getDataLocator match { def create_message_record: MessageRecord = {
case null =>
// encodes body and release object bodies, in case message was sent from // encodes body and release object bodies, in case message was sent from
// a VM connection. Releases additional memory. // a VM connection. Releases additional memory.
message.storeContentAndClear() message.storeContentAndClear()
var packet = manager.parent.wireFormat.marshal(message) var packet = manager.parent.wireFormat.marshal(message)
var data = new Buffer(packet.data, packet.offset, packet.length) var data = new Buffer(packet.data, packet.offset, packet.length)
if( manager.snappyCompressLogs ) { if (manager.snappyCompressLogs) {
data = Snappy.compress(data) data = Snappy.compress(data)
} }
val record = MessageRecord(id, data, message.isResponseRequired) val record = MessageRecord(manager.parent, id, data, message.isResponseRequired)
id.setDataLocator(record) id.setDataLocator(record)
record record
}
val messageRecord = id.getDataLocator match {
case null =>
create_message_record
case record:MessageRecord => case record:MessageRecord =>
if( record.store == manager.parent ) {
record record
} else {
create_message_record
}
case x:DataLocator => case x:DataLocator =>
if( x.store == manager.parent ) {
null null
} else {
create_message_record
}
} }
val entry = QueueEntryRecord(id, queueKey, queueSeq) val entry = QueueEntryRecord(id, queueKey, queueSeq)

View File

@ -1190,7 +1190,7 @@ class LevelDBClient(store: LevelDBStore) {
def queueCursor(collectionKey: Long, seq:Long)(func: (Message)=>Boolean) = { def queueCursor(collectionKey: Long, seq:Long)(func: (Message)=>Boolean) = {
collectionCursor(collectionKey, encodeLong(seq)) { (key, value) => collectionCursor(collectionKey, encodeLong(seq)) { (key, value) =>
val seq = decodeLong(key) val seq = decodeLong(key)
var locator = DataLocator(value.getValueLocation, value.getValueLength) var locator = DataLocator(store, value.getValueLocation, value.getValueLength)
val msg = getMessage(locator) val msg = getMessage(locator)
msg.getMessageId().setEntryLocator(EntryLocator(collectionKey, seq)) msg.getMessageId().setEntryLocator(EntryLocator(collectionKey, seq))
msg.getMessageId().setDataLocator(locator) msg.getMessageId().setDataLocator(locator)
@ -1211,12 +1211,12 @@ class LevelDBClient(store: LevelDBStore) {
val seq = is.readLong() val seq = is.readLong()
val sub = is.readLong() val sub = is.readLong()
val ack = store.wireFormat.unmarshal(is).asInstanceOf[MessageAck] val ack = store.wireFormat.unmarshal(is).asInstanceOf[MessageAck]
ack.getLastMessageId.setDataLocator(DataLocator(log, offset)) ack.getLastMessageId.setDataLocator(DataLocator(store, log, offset))
ack.getLastMessageId.setEntryLocator(EntryLocator(qid, seq)) ack.getLastMessageId.setEntryLocator(EntryLocator(qid, seq))
func(XaAckRecord(collectionKey, seq, ack, sub)) func(XaAckRecord(collectionKey, seq, ack, sub))
} else { } else {
var locator = DataLocator(value.getValueLocation, value.getValueLength) var locator = DataLocator(store, value.getValueLocation, value.getValueLength)
val msg = getMessage(locator) val msg = getMessage(locator)
msg.getMessageId().setEntryLocator(EntryLocator(collectionKey, seq)) msg.getMessageId().setEntryLocator(EntryLocator(collectionKey, seq))
msg.getMessageId().setDataLocator(locator) msg.getMessageId().setDataLocator(locator)
@ -1240,7 +1240,7 @@ class LevelDBClient(store: LevelDBStore) {
case x:MessageRecord => case x:MessageRecord =>
// Encoded form is still in memory.. // Encoded form is still in memory..
Some(x.data) Some(x.data)
case DataLocator(pos, len) => case DataLocator(store, pos, len) =>
// Load the encoded form from disk. // Load the encoded form from disk.
log.read(pos, len).map(new Buffer(_)) log.read(pos, len).map(new Buffer(_))
} }
@ -1335,7 +1335,7 @@ class LevelDBClient(store: LevelDBStore) {
val start = System.nanoTime() val start = System.nanoTime()
val p = appender.append(LOG_DATA, messageRecord.data) val p = appender.append(LOG_DATA, messageRecord.data)
log_info = p._2 log_info = p._2
dataLocator = DataLocator(p._1, messageRecord.data.length) dataLocator = DataLocator(store, p._1, messageRecord.data.length)
messageRecord.locator = dataLocator messageRecord.locator = dataLocator
// println("msg: "+messageRecord.id+" -> "+dataLocator) // println("msg: "+messageRecord.id+" -> "+dataLocator)
write_message_total += System.nanoTime() - start write_message_total += System.nanoTime() - start

View File

@ -133,7 +133,7 @@ class LevelDBStoreView(val store:LevelDBStore) extends LevelDBStoreViewMBean {
import LevelDBStore._ import LevelDBStore._
class LevelDBStore extends LockableServiceSupport with BrokerServiceAware with PersistenceAdapter with TransactionStore with PListStore { class LevelDBStore extends LockableServiceSupport with BrokerServiceAware with PersistenceAdapter with TransactionStore with PListStore with TransactionIdTransformerAware {
final val wireFormat = new OpenWireFormat final val wireFormat = new OpenWireFormat
final val db = new DBManager(this) final val db = new DBManager(this)
@ -284,6 +284,14 @@ class LevelDBStore extends LockableServiceSupport with BrokerServiceAware with P
} }
} }
var transactionIdTransformer: TransactionIdTransformer = new TransactionIdTransformer{
def transform(txid: TransactionId): TransactionId = txid
}
def setTransactionIdTransformer(transactionIdTransformer: TransactionIdTransformer) {
this.transactionIdTransformer = transactionIdTransformer
}
def setBrokerName(brokerName: String): Unit = { def setBrokerName(brokerName: String): Unit = {
} }
@ -407,7 +415,8 @@ class LevelDBStore extends LockableServiceSupport with BrokerServiceAware with P
} }
} }
def transaction(txid: TransactionId) = { def transaction(original: TransactionId) = {
val txid = transactionIdTransformer.transform(original)
var rc = transactions.get(txid) var rc = transactions.get(txid)
if( rc == null ) { if( rc == null ) {
rc = Transaction(txid) rc = Transaction(txid)
@ -419,11 +428,31 @@ class LevelDBStore extends LockableServiceSupport with BrokerServiceAware with P
rc rc
} }
def commit(txid: TransactionId, wasPrepared: Boolean, preCommit: Runnable, postCommit: Runnable) = { def verify_running = {
if( isStopping || isStopped ) {
try {
throw new IOException("Not running")
} catch {
case e:IOException =>
if( broker_service!=null ) {
broker_service.handleIOException(e)
}
throw e
}
}
}
def commit(original: TransactionId, wasPrepared: Boolean, preCommit: Runnable, postCommit: Runnable) = {
verify_running
val txid = transactionIdTransformer.transform(original)
transactions.remove(txid) match { transactions.remove(txid) match {
case null => case null =>
// Only in-flight non-persistent messages in this TX. // Only in-flight non-persistent messages in this TX.
if( preCommit!=null )
preCommit.run() preCommit.run()
if( postCommit!=null )
postCommit.run() postCommit.run()
case tx => case tx =>
val done = new CountDownLatch(1) val done = new CountDownLatch(1)
@ -435,6 +464,7 @@ class LevelDBStore extends LockableServiceSupport with BrokerServiceAware with P
} }
uow.syncFlag = true uow.syncFlag = true
uow.addCompleteListener { uow.addCompleteListener {
if( preCommit!=null )
preCommit.run() preCommit.run()
done.countDown() done.countDown()
} }
@ -444,11 +474,15 @@ class LevelDBStore extends LockableServiceSupport with BrokerServiceAware with P
if( tx.prepared ) { if( tx.prepared ) {
db.removeTransactionContainer(tx.xacontainer_id) db.removeTransactionContainer(tx.xacontainer_id)
} }
if( postCommit!=null )
postCommit.run() postCommit.run()
} }
} }
def rollback(txid: TransactionId) = { def rollback(original: TransactionId) = {
verify_running
val txid = transactionIdTransformer.transform(original)
transactions.remove(txid) match { transactions.remove(txid) match {
case null => case null =>
debug("on rollback, the transaction " + txid + " does not exist") debug("on rollback, the transaction " + txid + " does not exist")
@ -468,7 +502,10 @@ class LevelDBStore extends LockableServiceSupport with BrokerServiceAware with P
} }
} }
def prepare(tx: TransactionId) = { def prepare(original: TransactionId) = {
verify_running
val tx = transactionIdTransformer.transform(original)
transactions.get(tx) match { transactions.get(tx) match {
case null => case null =>
warn("on prepare, the transaction " + tx + " does not exist") warn("on prepare, the transaction " + tx + " does not exist")
@ -479,6 +516,9 @@ class LevelDBStore extends LockableServiceSupport with BrokerServiceAware with P
var doingRecover = false var doingRecover = false
def recover(listener: TransactionRecoveryListener) = { def recover(listener: TransactionRecoveryListener) = {
verify_running
this.doingRecover = true this.doingRecover = true
try { try {
import collection.JavaConversions._ import collection.JavaConversions._

View File

@ -0,0 +1,69 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.broker;
import junit.framework.Test;
import org.apache.activemq.command.ActiveMQDestination;
import org.apache.activemq.command.ActiveMQQueue;
import org.apache.activemq.store.kahadb.FilteredKahaDBPersistenceAdapter;
import org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter;
import org.apache.activemq.store.kahadb.MultiKahaDBPersistenceAdapter;
import org.apache.activemq.store.leveldb.LevelDBPersistenceAdapter;
import java.util.LinkedList;
import java.util.List;
public class mLevelDBXARecoveryBrokerTest extends XARecoveryBrokerTest {
@Override
protected void configureBroker(BrokerService broker) throws Exception {
super.configureBroker(broker);
MultiKahaDBPersistenceAdapter mKahaDB = new MultiKahaDBPersistenceAdapter();
List adapters = new LinkedList<FilteredKahaDBPersistenceAdapter>();
FilteredKahaDBPersistenceAdapter defaultEntry = new FilteredKahaDBPersistenceAdapter();
defaultEntry.setPersistenceAdapter(new LevelDBPersistenceAdapter());
adapters.add(defaultEntry);
FilteredKahaDBPersistenceAdapter special = new FilteredKahaDBPersistenceAdapter();
special.setDestination(new ActiveMQQueue("special"));
special.setPersistenceAdapter(new LevelDBPersistenceAdapter());
adapters.add(special);
mKahaDB.setFilteredPersistenceAdapters(adapters);
broker.setPersistenceAdapter(mKahaDB);
}
public static Test suite() {
return suite(mLevelDBXARecoveryBrokerTest.class);
}
public static void main(String[] args) {
junit.textui.TestRunner.run(suite());
}
protected ActiveMQDestination createDestination() {
return new ActiveMQQueue("test,special");
}
public void testQueuePersistentPreparedAcksAvailableAfterRestartAndRollback() throws Exception {
// super.testQueuePersistentPreparedAcksAvailableAfterRestartAndRollback();
}
public void testQueuePersistentUncommittedAcksLostOnRestart() throws Exception {
// super.testQueuePersistentUncommittedAcksLostOnRestart();
}
}

View File

@ -0,0 +1,35 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.store;
import org.apache.activemq.leveldb.LevelDBStore;
import java.io.IOException;
public class LevelDBStorePerDestinationTest extends StorePerDestinationTest {
@Override
protected PersistenceAdapter createStore(boolean delete) throws IOException {
LevelDBStore store = new LevelDBStore();
store.setLogSize(maxFileLength);
if (delete) {
store.deleteAllMessages();
}
return store;
}
}

View File

@ -62,7 +62,7 @@ public class StorePerDestinationTest {
return broker; return broker;
} }
protected KahaDBPersistenceAdapter createStore(boolean delete) throws IOException { protected PersistenceAdapter createStore(boolean delete) throws IOException {
KahaDBPersistenceAdapter kaha = new KahaDBPersistenceAdapter(); KahaDBPersistenceAdapter kaha = new KahaDBPersistenceAdapter();
kaha.setJournalMaxFileLength(maxFileLength); kaha.setJournalMaxFileLength(maxFileLength);
kaha.setCleanupInterval(5000); kaha.setCleanupInterval(5000);
@ -183,7 +183,7 @@ public class StorePerDestinationTest {
FilteredKahaDBPersistenceAdapter otherFilteredKahaDBPersistenceAdapter = FilteredKahaDBPersistenceAdapter otherFilteredKahaDBPersistenceAdapter =
new FilteredKahaDBPersistenceAdapter(); new FilteredKahaDBPersistenceAdapter();
KahaDBPersistenceAdapter otherStore = createStore(false); PersistenceAdapter otherStore = createStore(false);
File someOtherDisk = new File("target" + File.separator + "someOtherDisk"); File someOtherDisk = new File("target" + File.separator + "someOtherDisk");
otherStore.setDirectory(someOtherDisk); otherStore.setDirectory(someOtherDisk);
otherFilteredKahaDBPersistenceAdapter.setPersistenceAdapter(otherStore); otherFilteredKahaDBPersistenceAdapter.setPersistenceAdapter(otherStore);
@ -192,7 +192,7 @@ public class StorePerDestinationTest {
FilteredKahaDBPersistenceAdapter filteredKahaDBPersistenceAdapterDefault = FilteredKahaDBPersistenceAdapter filteredKahaDBPersistenceAdapterDefault =
new FilteredKahaDBPersistenceAdapter(); new FilteredKahaDBPersistenceAdapter();
KahaDBPersistenceAdapter storeDefault = createStore(false); PersistenceAdapter storeDefault = createStore(false);
filteredKahaDBPersistenceAdapterDefault.setPersistenceAdapter(storeDefault); filteredKahaDBPersistenceAdapterDefault.setPersistenceAdapter(storeDefault);
adapters.add(filteredKahaDBPersistenceAdapterDefault); adapters.add(filteredKahaDBPersistenceAdapterDefault);