- *
- * - * @author Hiram Chirino - */ -public interface LevelDBStoreTestMBean { - - @MBeanInfo("Used to set if the log force calls should be suspended") - void setSuspendForce(boolean value); - - @MBeanInfo("Gets if the log force calls should be suspended") - boolean getSuspendForce(); - - @MBeanInfo("Gets the number of threads waiting to do a log force call.") - long getForceCalls(); - - @MBeanInfo("Used to set if the log write calls should be suspended") - void setSuspendWrite(boolean value); - - @MBeanInfo("Gets if the log write calls should be suspended") - boolean getSuspendWrite(); - - @MBeanInfo("Gets the number of threads waiting to do a log write call.") - long getWriteCalls(); - - @MBeanInfo("Used to set if the log delete calls should be suspended") - void setSuspendDelete(boolean value); - - @MBeanInfo("Gets if the log delete calls should be suspended") - boolean getSuspendDelete(); - - @MBeanInfo("Gets the number of threads waiting to do a log delete call.") - long getDeleteCalls(); -} diff --git a/activemq-leveldb-store/src/main/java/org/apache/activemq/leveldb/LevelDBStoreViewMBean.java b/activemq-leveldb-store/src/main/java/org/apache/activemq/leveldb/LevelDBStoreViewMBean.java deleted file mode 100644 index 68507950ae..0000000000 --- a/activemq-leveldb-store/src/main/java/org/apache/activemq/leveldb/LevelDBStoreViewMBean.java +++ /dev/null @@ -1,111 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.activemq.leveldb; - -import org.apache.activemq.broker.jmx.MBeanInfo; - -import java.io.File; - -/** - *- *
- * - * @author Hiram Chirino - */ -public interface LevelDBStoreViewMBean { - - @MBeanInfo("The directory holding the store index data.") - String getIndexDirectory(); - - @MBeanInfo("The directory holding the store log data.") - String getLogDirectory(); - - @MBeanInfo("The size the log files are allowed to grow to.") - long getLogSize(); - - @MBeanInfo("The implementation of the LevelDB index being used.") - String getIndexFactory(); - - @MBeanInfo("Are writes synced to disk.") - boolean getSync(); - - @MBeanInfo("Is data verified against checksums as it's loaded back from disk.") - boolean getVerifyChecksums(); - - @MBeanInfo("The maximum number of open files the index will open at one time.") - int getIndexMaxOpenFiles(); - - @MBeanInfo("Number of keys between restart points for delta encoding of keys in the index") - int getIndexBlockRestartInterval(); - - @MBeanInfo("Do aggressive checking of store data") - boolean getParanoidChecks(); - - @MBeanInfo("Amount of data to build up in memory for the index before converting to a sorted on-disk file.") - int getIndexWriteBufferSize(); - - @MBeanInfo("Approximate size of user data packed per block for the index") - int getIndexBlockSize(); - - @MBeanInfo("The type of compression to use for the index") - String getIndexCompression(); - - @MBeanInfo("The size of the cache index") - long getIndexCacheSize(); - - @MBeanInfo("The maximum amount of async writes to buffer up") - int getAsyncBufferSize(); - - @MBeanInfo("The number of units of work which have been closed.") - long getUowClosedCounter(); - @MBeanInfo("The number of units of work which have been canceled.") - long getUowCanceledCounter(); - @MBeanInfo("The number of units of work which started getting stored.") - long getUowStoringCounter(); - @MBeanInfo("The number of units of work which completed getting stored") - long getUowStoredCounter(); - - @MBeanInfo("Gets and resets the maximum time (in ms) a unit of work took to complete.") - double resetUowMaxCompleteLatency(); - @MBeanInfo("Gets and resets the maximum time (in ms) an index write batch took to execute.") - double resetMaxIndexWriteLatency(); - @MBeanInfo("Gets and resets the maximum time (in ms) a log write took to execute (includes the index write latency).") - double resetMaxLogWriteLatency(); - @MBeanInfo("Gets and resets the maximum time (in ms) a log flush took to execute.") - double resetMaxLogFlushLatency(); - @MBeanInfo("Gets and resets the maximum time (in ms) a log rotation took to perform.") - double resetMaxLogRotateLatency(); - - @MBeanInfo("Gets the maximum time (in ms) a unit of work took to complete.") - double getUowMaxCompleteLatency(); - @MBeanInfo("Gets the maximum time (in ms) an index write batch took to execute.") - double getMaxIndexWriteLatency(); - @MBeanInfo("Gets the maximum time (in ms) a log write took to execute (includes the index write latency).") - double getMaxLogWriteLatency(); - @MBeanInfo("Gets the maximum time (in ms) a log flush took to execute.") - double getMaxLogFlushLatency(); - @MBeanInfo("Gets the maximum time (in ms) a log rotation took to perform.") - double getMaxLogRotateLatency(); - - @MBeanInfo("Gets the index statistics.") - String getIndexStats(); - - @MBeanInfo("Compacts disk usage") - void compact(); - -} diff --git a/activemq-leveldb-store/src/main/java/org/apache/activemq/leveldb/replicated/ReplicatedLevelDBStoreViewMBean.java b/activemq-leveldb-store/src/main/java/org/apache/activemq/leveldb/replicated/ReplicatedLevelDBStoreViewMBean.java deleted file mode 100644 index 9c81eb1aa9..0000000000 --- a/activemq-leveldb-store/src/main/java/org/apache/activemq/leveldb/replicated/ReplicatedLevelDBStoreViewMBean.java +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.activemq.leveldb.replicated; - -import org.apache.activemq.broker.jmx.MBeanInfo; - -import javax.management.openmbean.CompositeData; - -/** - *- *
- * - * @author Hiram Chirino - */ -public interface ReplicatedLevelDBStoreViewMBean { - - @MBeanInfo("The address of the ZooKeeper server.") - String getZkAddress(); - @MBeanInfo("The path in ZooKeeper to hold master elections.") - String getZkPath(); - @MBeanInfo("The ZooKeeper session timeout.") - String getZkSessionTimeout(); - @MBeanInfo("The address and port the master will bind for the replication protocol.") - String getBind(); - @MBeanInfo("The number of replication nodes that will be part of the replication cluster.") - int getReplicas(); - - @MBeanInfo("The role of this node in the replication cluster.") - String getNodeRole(); - - @MBeanInfo("The replication status.") - String getStatus(); - - @MBeanInfo("The status of the connected slaves.") - CompositeData[] getSlaves(); - - @MBeanInfo("The current position of the replication log.") - Long getPosition(); - - @MBeanInfo("When the last entry was added to the replication log.") - Long getPositionDate(); - - @MBeanInfo("The directory holding the data.") - String getDirectory(); - - @MBeanInfo("The sync strategy to use.") - String getSync(); - - @MBeanInfo("The node id of this replication node.") - String getNodeId(); -} diff --git a/activemq-leveldb-store/src/main/java/org/apache/activemq/leveldb/replicated/dto/FileInfo.java b/activemq-leveldb-store/src/main/java/org/apache/activemq/leveldb/replicated/dto/FileInfo.java deleted file mode 100644 index 530b329145..0000000000 --- a/activemq-leveldb-store/src/main/java/org/apache/activemq/leveldb/replicated/dto/FileInfo.java +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.activemq.leveldb.replicated.dto; - -import javax.xml.bind.annotation.XmlAccessType; -import javax.xml.bind.annotation.XmlAccessorType; -import javax.xml.bind.annotation.XmlAttribute; -import javax.xml.bind.annotation.XmlRootElement; -import java.util.HashSet; -import java.util.Set; - -/** - * @author Hiram Chirino - */ -@XmlRootElement(name="file_info") -@XmlAccessorType(XmlAccessType.FIELD) -public class FileInfo { - - @XmlAttribute(name = "file") - public String file; - - @XmlAttribute(name = "length") - public long length; - - @XmlAttribute(name = "crc32") - public long crc32; -} diff --git a/activemq-leveldb-store/src/main/java/org/apache/activemq/leveldb/replicated/dto/LogDelete.java b/activemq-leveldb-store/src/main/java/org/apache/activemq/leveldb/replicated/dto/LogDelete.java deleted file mode 100644 index 9500eecc52..0000000000 --- a/activemq-leveldb-store/src/main/java/org/apache/activemq/leveldb/replicated/dto/LogDelete.java +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.activemq.leveldb.replicated.dto; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; - -import javax.xml.bind.annotation.XmlAccessType; -import javax.xml.bind.annotation.XmlAccessorType; -import javax.xml.bind.annotation.XmlAttribute; -import javax.xml.bind.annotation.XmlRootElement; - -/** - * @author Hiram Chirino - */ -@XmlRootElement(name="remove_request") -@XmlAccessorType(XmlAccessType.FIELD) -@JsonIgnoreProperties(ignoreUnknown = true) -public class LogDelete { - @XmlAttribute(name="log") - public long log; -} diff --git a/activemq-leveldb-store/src/main/java/org/apache/activemq/leveldb/replicated/dto/LogWrite.java b/activemq-leveldb-store/src/main/java/org/apache/activemq/leveldb/replicated/dto/LogWrite.java deleted file mode 100644 index e2a3182947..0000000000 --- a/activemq-leveldb-store/src/main/java/org/apache/activemq/leveldb/replicated/dto/LogWrite.java +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.activemq.leveldb.replicated.dto; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; - -import javax.xml.bind.annotation.XmlAccessType; -import javax.xml.bind.annotation.XmlAccessorType; -import javax.xml.bind.annotation.XmlAttribute; -import javax.xml.bind.annotation.XmlRootElement; - -/** - * @author Hiram Chirino - */ -@XmlRootElement(name="log_write") -@XmlAccessorType(XmlAccessType.FIELD) -@JsonIgnoreProperties(ignoreUnknown = true) -public class LogWrite { - - @XmlAttribute(name="file") - public long file; - - @XmlAttribute(name="offset") - public long offset; - - @XmlAttribute(name="length") - public long length; - - @XmlAttribute(name="sync") - public boolean sync=false; - - @XmlAttribute(name="date") - public long date; -} diff --git a/activemq-leveldb-store/src/main/java/org/apache/activemq/leveldb/replicated/dto/Login.java b/activemq-leveldb-store/src/main/java/org/apache/activemq/leveldb/replicated/dto/Login.java deleted file mode 100644 index 0d9fedc6ec..0000000000 --- a/activemq-leveldb-store/src/main/java/org/apache/activemq/leveldb/replicated/dto/Login.java +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.activemq.leveldb.replicated.dto; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; - -import javax.xml.bind.annotation.XmlAccessType; -import javax.xml.bind.annotation.XmlAccessorType; -import javax.xml.bind.annotation.XmlAttribute; -import javax.xml.bind.annotation.XmlRootElement; - -/** - * @author Hiram Chirino - */ -@XmlRootElement(name="login") -@XmlAccessorType(XmlAccessType.FIELD) -@JsonIgnoreProperties(ignoreUnknown = true) -public class Login { - - @XmlAttribute(name="node_id") - public String node_id; - - @XmlAttribute(name="security_token") - public String security_token; - -} diff --git a/activemq-leveldb-store/src/main/java/org/apache/activemq/leveldb/replicated/dto/SyncResponse.java b/activemq-leveldb-store/src/main/java/org/apache/activemq/leveldb/replicated/dto/SyncResponse.java deleted file mode 100644 index 9deda12965..0000000000 --- a/activemq-leveldb-store/src/main/java/org/apache/activemq/leveldb/replicated/dto/SyncResponse.java +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.activemq.leveldb.replicated.dto; - -import javax.xml.bind.annotation.XmlAccessType; -import javax.xml.bind.annotation.XmlAccessorType; -import javax.xml.bind.annotation.XmlAttribute; -import javax.xml.bind.annotation.XmlRootElement; -import java.util.HashSet; -import java.util.Set; - -/** - * @author Hiram Chirino - */ -@XmlRootElement(name="sync_response") -@XmlAccessorType(XmlAccessType.FIELD) -public class SyncResponse { - - @XmlAttribute(name = "snapshot_position") - public long snapshot_position; - - @XmlAttribute(name = "wal_append_position") - public long wal_append_position; - - @XmlAttribute(name = "index_files") - public Set
- * A Snappy abstraction which attempts uses the iq80 implementation and falls back
- * to the xerial Snappy implementation it cannot be loaded. You can change the
- * load order by setting the 'leveldb.snappy' system property. Example:
- *
- *
- * -Dleveldb.snappy=xerial,iq80
- *
- *
- * The system property can also be configured with the name of a class which
- * implements the Snappy.SPI interface.
- *
- *
- * - * @author Hiram Chirino - */ -class CountDownFuture[T <: AnyRef]() extends ListenableFuture[T] { - - private val latch:CountDownLatch=new CountDownLatch(1) - @volatile - var value:T = _ - var error:Throwable = _ - var listener:Runnable = _ - - def cancel(mayInterruptIfRunning: Boolean) = false - def isCancelled = false - - - def completed = latch.getCount()==0 - def await() = latch.await() - def await(p1: Long, p2: TimeUnit) = latch.await(p1, p2) - - def set(v:T) = { - value = v - latch.countDown() - fireListener - } - def failed(v:Throwable) = { - error = v - latch.countDown() - fireListener - } - - def get() = { - latch.await() - if( error!=null ) { - throw error; - } - value - } - - def get(p1: Long, p2: TimeUnit) = { - if(latch.await(p1, p2)) { - if( error!=null ) { - throw error; - } - value - } else { - throw new TimeoutException - } - } - - def isDone = latch.await(0, TimeUnit.SECONDS); - - def fireListener = { - if (listener != null) { - try { - listener.run() - } catch { - case e : Throwable => { - LevelDBStore.warn(e, "unexpected exception on future listener " +listener) - } - } - } - } - - def addListener(l: Runnable) = { - listener = l - if (isDone) { - fireListener - } - } -} - -object UowManagerConstants { - val QUEUE_COLLECTION_TYPE = 1 - val TOPIC_COLLECTION_TYPE = 2 - val TRANSACTION_COLLECTION_TYPE = 3 - val SUBSCRIPTION_COLLECTION_TYPE = 4 - - case class QueueEntryKey(queue:Long, seq:Long) - def key(x:QueueEntryRecord) = QueueEntryKey(x.queueKey, x.queueSeq) -} - -import UowManagerConstants._ - -class DelayableUOW(val manager:DBManager) extends BaseRetained { - val countDownFuture = new CountDownFuture[AnyRef]() - var canceled = false; - - val uowId:Int = manager.lastUowId.incrementAndGet() - var actions = Map[MessageId, MessageAction]() - var subAcks = ListBuffer[SubAckRecord]() - var completed = false - var disableDelay = false - var delayableActions = 0 - - private var _state:UowState = UowOpen - - def state = this._state - def state_=(next:UowState) { - assert(this._state.stage < next.stage) - this._state = next - } - - var syncFlag = false - def syncNeeded = syncFlag || actions.find( _._2.syncNeeded ).isDefined - def size = 100+actions.foldLeft(0L){ case (sum, entry) => - sum + (entry._2.size+100) - } + (subAcks.size * 100) - - class MessageAction { - var id:MessageId = _ - var messageRecord: MessageRecord = null - var enqueues = ListBuffer[QueueEntryRecord]() - var dequeues = ListBuffer[QueueEntryRecord]() - var xaAcks = ListBuffer[XaAckRecord]() - - def uow = DelayableUOW.this - def isEmpty() = messageRecord==null && enqueues.isEmpty && dequeues.isEmpty && xaAcks.isEmpty - - def cancel() = { - uow.rm(id) - } - - def syncNeeded = messageRecord!=null && messageRecord.syncNeeded - def size = (if(messageRecord!=null) messageRecord.data.length+20 else 0) + ((enqueues.size+dequeues.size)*50) + xaAcks.foldLeft(0L){ case (sum, entry) => - sum + 100 - } - - def addToPendingStore() = { - var set = manager.pendingStores.get(id) - if(set==null) { - set = HashSet() - manager.pendingStores.put(id, set) - } - set.add(this) - } - - def removeFromPendingStore() = { - var set = manager.pendingStores.get(id) - if(set!=null) { - set.remove(this) - if(set.isEmpty) { - manager.pendingStores.remove(id) - } - } - } - - } - - def completeAsap() = this.synchronized { disableDelay=true } - def delayable = !disableDelay && delayableActions>0 && manager.flushDelay>0 - - def rm(msg:MessageId) = { - actions -= msg - if( actions.isEmpty && state.stage < UowFlushing.stage ) { - cancel - } - } - - def cancel = { - manager.dispatchQueue.assertExecuting() - manager.uowCanceledCounter += 1 - canceled = true - manager.flush_queue.remove(uowId) - onCompleted() - } - - def getAction(id:MessageId) = { - actions.get(id) match { - case Some(x) => x - case None => - val x = new MessageAction - x.id = id - actions += id->x - x - } - } - - def updateAckPosition(sub_key:Long, ack_seq:Long) = { - subAcks += SubAckRecord(sub_key, ack_seq) - } - - def xaAck(record:XaAckRecord) = { - this.synchronized { - getAction(record.ack.getLastMessageId).xaAcks+=record - } - countDownFuture - } - - def enqueue(queueKey:Long, queueSeq:Long, message:Message, delay_enqueue:Boolean) = { - var delay = delay_enqueue && message.getTransactionId==null - if(delay ) { - manager.uowEnqueueDelayReqested += 1 - } else { - manager.uowEnqueueNodelayReqested += 1 - } - - val id = message.getMessageId - - def create_message_record: MessageRecord = { - // encodes body and release object bodies, in case message was sent from - // a VM connection. Releases additional memory. - message.storeContentAndClear() - var packet = manager.parent.wireFormat.marshal(message) - var data = new Buffer(packet.data, packet.offset, packet.length) - if (manager.snappyCompressLogs) { - data = Snappy.compress(data) - } - val record = MessageRecord(manager.parent, id, data, message.isResponseRequired) - id.setDataLocator(record) - record - } - - val messageRecord = id.getDataLocator match { - case null => - create_message_record - case record:MessageRecord => - if( record.store == manager.parent ) { - record - } else { - create_message_record - } - case x:DataLocator => - if( x.store == manager.parent ) { - null - } else { - create_message_record - } - } - - val entry = QueueEntryRecord(id, queueKey, queueSeq) - assert(id.getEntryLocator == null) - id.setEntryLocator(EntryLocator(queueKey, queueSeq)) - - val a = this.synchronized { - if( !delay ) - disableDelay = true - - val action = getAction(entry.id) - action.messageRecord = messageRecord - action.enqueues += entry - delayableActions += 1 - action - } - - manager.dispatchQueue { - manager.cancelable_enqueue_actions.put(key(entry), a) - a.addToPendingStore() - } - countDownFuture - } - - def incrementRedelivery(expectedQueueKey:Long, id:MessageId) = { - if( id.getEntryLocator != null ) { - val EntryLocator(queueKey, queueSeq) = id.getEntryLocator.asInstanceOf[EntryLocator]; - assert(queueKey == expectedQueueKey) - val counter = manager.client.getDeliveryCounter(queueKey, queueSeq) - val entry = QueueEntryRecord(id, queueKey, queueSeq, counter+1) - val a = this.synchronized { - val action = getAction(entry.id) - action.enqueues += entry - delayableActions += 1 - action - } - manager.dispatchQueue { - manager.cancelable_enqueue_actions.put(key(entry), a) - a.addToPendingStore() - } - } - countDownFuture - } - - def dequeue(expectedQueueKey:Long, id:MessageId) = { - if( id.getEntryLocator != null ) { - val EntryLocator(queueKey, queueSeq) = id.getEntryLocator.asInstanceOf[EntryLocator]; - assert(queueKey == expectedQueueKey) - val entry = QueueEntryRecord(id, queueKey, queueSeq) - this.synchronized { - getAction(id).dequeues += entry - } - } - countDownFuture - } - - def complete_asap = this.synchronized { - disableDelay=true - if( state eq UowDelayed ) { - manager.enqueueFlush(this) - } - } - - var complete_listeners = ListBuffer[()=>Unit]() - def addCompleteListener(func: =>Unit) = { - complete_listeners.append( func _ ) - } - - var asyncCapacityUsed = 0L - var disposed_at = 0L - - override def dispose = this.synchronized { - state = UowClosed - disposed_at = System.nanoTime() - if( !syncNeeded ) { - val s = size - if( manager.asyncCapacityRemaining.addAndGet(-s) > 0 ) { - asyncCapacityUsed = s - complete_listeners.foreach(_()) - } else { - manager.asyncCapacityRemaining.addAndGet(s) - } - } - // closeSource.merge(this) - manager.dispatchQueue { - manager.processClosed(this) - } - } - - def onCompleted(error:Throwable=null) = this.synchronized { - if ( state.stage < UowCompleted.stage ) { - state = UowCompleted - if( asyncCapacityUsed != 0 ) { - manager.asyncCapacityRemaining.addAndGet(asyncCapacityUsed) - asyncCapacityUsed = 0 - } else { - manager.uow_complete_latency.add(System.nanoTime() - disposed_at) - complete_listeners.foreach(_()) - } - if( error == null ) { - countDownFuture.set(null) - } else { - countDownFuture.failed(error) - } - - for( (id, action) <- actions ) { - if( !action.enqueues.isEmpty ) { - action.removeFromPendingStore() - } - for( queueEntry <- action.enqueues ) { - manager.cancelable_enqueue_actions.remove(key(queueEntry)) - } - } - super.dispose - } - } -} - -/** - *- *
- * - * @author Hiram Chirino - */ -class DBManager(val parent:LevelDBStore) { - - var lastCollectionKey = new AtomicLong(0) - var lastPListKey = new AtomicLong(0) - def client = parent.client - - def writeExecutor = client.writeExecutor - def flushDelay = parent.flushDelay - - val dispatchQueue = createQueue(toString) -// val aggregator = new AggregatingExecutor(dispatchQueue) - - val asyncCapacityRemaining = new AtomicLong(0L) - - def createUow() = new DelayableUOW(this) - - var uowEnqueueDelayReqested = 0L - var uowEnqueueNodelayReqested = 0L - var uowClosedCounter = 0L - var uowCanceledCounter = 0L - var uowStoringCounter = 0L - var uowStoredCounter = 0L - - val uow_complete_latency = TimeMetric() - -// val closeSource = createSource(new ListEventAggregator[DelayableUOW](), dispatchQueue) -// closeSource.setEventHandler(^{ -// closeSource.getData.foreach { uow => -// processClosed(uow) -// } -// }); -// closeSource.resume - - var pendingStores = new ConcurrentHashMap[MessageId, HashSet[DelayableUOW#MessageAction]]() - - var cancelable_enqueue_actions = new HashMap[QueueEntryKey, DelayableUOW#MessageAction]() - - val lastUowId = new AtomicInteger(1) - - var producerSequenceIdTracker = new ActiveMQMessageAuditNoSync - - def getLastProducerSequenceId(id: ProducerId): Long = dispatchQueue.sync { - producerSequenceIdTracker.getLastSeqId(id) - } - - def processClosed(uow:DelayableUOW) = { - dispatchQueue.assertExecuting() - uowClosedCounter += 1 - - // Broker could issue a flush_message call before - // this stage runs.. which make the stage jump over UowDelayed - if( uow.state.stage < UowDelayed.stage ) { - uow.state = UowDelayed - } - if( uow.state.stage < UowFlushing.stage ) { - uow.actions.foreach { case (id, action) => - - // The UoW may have been canceled. - if( action.messageRecord!=null && action.enqueues.isEmpty ) { - action.removeFromPendingStore() - action.messageRecord = null - uow.delayableActions -= 1 - } - if( action.isEmpty ) { - action.cancel() - } - - // dequeues can cancel out previous enqueues - action.dequeues.foreach { entry=> - val entry_key = key(entry) - val prev_action:DelayableUOW#MessageAction = cancelable_enqueue_actions.remove(entry_key) - - if( prev_action!=null ) { - val prev_uow = prev_action.uow - prev_uow.synchronized { - if( !prev_uow.canceled ) { - - prev_uow.delayableActions -= 1 - - // yay we can cancel out a previous enqueue - prev_action.enqueues = prev_action.enqueues.filterNot( x=> key(x) == entry_key ) - if( prev_uow.state.stage >= UowDelayed.stage ) { - - // if the message is not in any queues.. we can gc it.. - if( prev_action.enqueues == Nil && prev_action.messageRecord !=null ) { - prev_action.removeFromPendingStore() - prev_action.messageRecord = null - prev_uow.delayableActions -= 1 - } - - // Cancel the action if it's now empty - if( prev_action.isEmpty ) { - prev_action.cancel() - } else if( !prev_uow.delayable ) { - // flush it if there is no point in delaying anymore - prev_uow.complete_asap - } - } - } - } - // since we canceled out the previous enqueue.. now cancel out the action - action.dequeues = action.dequeues.filterNot( _ == entry) - if( action.isEmpty ) { - action.cancel() - } - } - } - } - } - - if( !uow.canceled && uow.state.stage < UowFlushQueued.stage ) { - if( uow.delayable ) { - // Let the uow get GCed if its' canceled during the delay window.. - val ref = new WeakReference[DelayableUOW](uow) - scheduleFlush(ref) - } else { - enqueueFlush(uow) - } - } - } - - private def scheduleFlush(ref: WeakReference[DelayableUOW]) { - dispatchQueue.executeAfter(flushDelay, TimeUnit.MILLISECONDS, ^ { - val uow = ref.get(); - if (uow != null) { - enqueueFlush(uow) - } - }) - } - - val flush_queue = new java.util.LinkedHashMap[Long, DelayableUOW]() - - def enqueueFlush(uow:DelayableUOW) = { - dispatchQueue.assertExecuting() - if( uow!=null && !uow.canceled && uow.state.stage < UowFlushQueued.stage ) { - uow.state = UowFlushQueued - flush_queue.put (uow.uowId, uow) - flushSource.merge(1) - } - } - - val flushSource = createSource(EventAggregators.INTEGER_ADD, dispatchQueue) - flushSource.setEventHandler(^{drainFlushes}); - flushSource.resume - - def drainFlushes:Unit = { - dispatchQueue.assertExecuting() - - // Some UOWs may have been canceled. - import collection.JavaConversions._ - val values = flush_queue.values().toSeq.toArray - flush_queue.clear() - - val uows = values.flatMap { uow=> - if( uow.canceled ) { - None - } else { - // It will not be possible to cancel the UOW anymore.. - uow.state = UowFlushing - uow.actions.foreach { case (_, action) => - action.enqueues.foreach { queue_entry=> - val action = cancelable_enqueue_actions.remove(key(queue_entry)) - assert(action!=null) - } - } - if( !started ) { - uow.onCompleted(new SuppressReplyException("Store stopped")) - None - } else { - Some(uow) - } - } - } - - if( !uows.isEmpty ) { - uowStoringCounter += uows.size - flushSource.suspend - writeExecutor { - val e = try { - client.store(uows) - null - } catch { - case e:Throwable => e - } - flushSource.resume - dispatchQueue { - uowStoredCounter += uows.size - uows.foreach { uow=> - uow.onCompleted(e) - } - } - } - } - } - - var started = false - def snappyCompressLogs = parent.snappyCompressLogs - - def start = { - asyncCapacityRemaining.set(parent.asyncBufferSize) - client.start() - dispatchQueue.sync { - started = true - pollGc - if(parent.monitorStats) { - monitorStats - } - } - } - - def stop() = { - dispatchQueue.sync { - started = false - } - client.stop() - } - - def pollGc:Unit = dispatchQueue.after(10, TimeUnit.SECONDS) { - if( started ) { - val positions = parent.getTopicGCPositions - writeExecutor { - if( started ) { - client.gc(positions) - pollGc - } - } - } - } - - def monitorStats:Unit = dispatchQueue.after(1, TimeUnit.SECONDS) { - if( started ) { - println(("committed: %d, canceled: %d, storing: %d, stored: %d, " + - "uow complete: %,.3f ms, " + - "index write: %,.3f ms, " + - "log write: %,.3f ms, log flush: %,.3f ms, log rotate: %,.3f ms"+ - "add msg: %,.3f ms, add enqueue: %,.3f ms, " + - "uowEnqueueDelayReqested: %d, uowEnqueueNodelayReqested: %d " - ).format( - uowClosedCounter, uowCanceledCounter, uowStoringCounter, uowStoredCounter, - uow_complete_latency.reset, - client.max_index_write_latency.reset, - client.log.max_log_write_latency.reset, client.log.max_log_flush_latency.reset, client.log.max_log_rotate_latency.reset, - client.max_write_message_latency.reset, client.max_write_enqueue_latency.reset, - uowEnqueueDelayReqested, uowEnqueueNodelayReqested - )) - uowClosedCounter = 0 -// uowCanceledCounter = 0 - uowStoringCounter = 0 - uowStoredCounter = 0 - monitorStats - } - } - - ///////////////////////////////////////////////////////////////////// - // - // Implementation of the Store interface - // - ///////////////////////////////////////////////////////////////////// - - def checkpoint(sync:Boolean) = writeExecutor.sync { - client.snapshotIndex(sync) - } - - def purge = writeExecutor.sync { - client.purge - lastCollectionKey.set(1) - } - - def getLastQueueEntrySeq(key:Long) = { - client.getLastQueueEntrySeq(key) - } - - def collectionEmpty(key:Long) = writeExecutor.sync { - client.collectionEmpty(key) - } - - def collectionSize(key:Long) = { - client.collectionSize(key) - } - - def collectionIsEmpty(key:Long) = { - client.collectionIsEmpty(key) - } - - def cursorMessages(preparedAcks:java.util.HashSet[MessageId], key:Long, listener:MessageRecoveryListener, startPos:Long, endPos:Long=Long.MaxValue, max:Long=Long.MaxValue) = { - var lastmsgid:MessageId = null - var count = 0L - client.queueCursor(key, startPos, endPos) { msg => - if( !preparedAcks.contains(msg.getMessageId) && listener.recoverMessage(msg) ) { - lastmsgid = msg.getMessageId - count += 1 - } - count < max && listener.canRecoveryNextMessage - } - if( lastmsgid==null ) { - startPos - } else { - lastmsgid.getEntryLocator.asInstanceOf[EntryLocator].seq+1 - } - } - - def getXAActions(key:Long) = { - val msgs = ListBuffer[Message]() - val acks = ListBuffer[XaAckRecord]() - client.transactionCursor(key) { command => - command match { - case message:Message => msgs += message - case record:XaAckRecord => acks += record - } - true - } - (msgs, acks) - } - - def queuePosition(id: MessageId):Long = { - id.getEntryLocator.asInstanceOf[EntryLocator].seq - } - - def createQueueStore(dest:ActiveMQQueue):LevelDBStore#LevelDBMessageStore = { - parent.createQueueMessageStore(dest, createCollection(utf8(dest.getQualifiedName), QUEUE_COLLECTION_TYPE)) - } - def destroyQueueStore(key:Long) = writeExecutor.sync { - client.removeCollection(key) - } - - def getLogAppendPosition = writeExecutor.sync { - client.getLogAppendPosition - } - - def addSubscription(topic_key:Long, info:SubscriptionInfo):DurableSubscription = { - val record = new SubscriptionRecord.Bean - record.setTopicKey(topic_key) - record.setClientId(info.getClientId) - record.setSubscriptionName(info.getSubscriptionName) - if( info.getSelector!=null ) { - record.setSelector(info.getSelector) - } - if( info.getDestination!=null ) { - record.setDestinationName(info.getDestination.getQualifiedName) - } - if ( info.getSubscribedDestination!=null) { - record.setSubscribedDestinationName(info.getSubscribedDestination.getQualifiedName) - } - val collection = new CollectionRecord.Bean() - collection.setType(SUBSCRIPTION_COLLECTION_TYPE) - collection.setKey(lastCollectionKey.incrementAndGet()) - collection.setMeta(record.freeze().toUnframedBuffer) - - val buffer = collection.freeze() - buffer.toFramedBuffer // eager encode the record. - writeExecutor.sync { - client.addCollection(buffer) - } - DurableSubscription(collection.getKey, topic_key, info) - } - - def removeSubscription(sub:DurableSubscription) { - writeExecutor.sync { - client.removeCollection(sub.subKey) - } - } - - def createTopicStore(dest:ActiveMQTopic) = { - var key = createCollection(utf8(dest.getQualifiedName), TOPIC_COLLECTION_TYPE) - parent.createTopicMessageStore(dest, key) - } - - def createCollection(name:Buffer, collectionType:Int) = { - val collection = new CollectionRecord.Bean() - collection.setType(collectionType) - collection.setMeta(name) - collection.setKey(lastCollectionKey.incrementAndGet()) - val buffer = collection.freeze() - buffer.toFramedBuffer // eager encode the record. - writeExecutor.sync { - client.addCollection(buffer) - } - collection.getKey - } - - def buffer(packet:ByteSequence) = new Buffer(packet.data, packet.offset, packet.length) - - def createTransactionContainer(id:XATransactionId) = - createCollection(buffer(parent.wireFormat.marshal(id)), TRANSACTION_COLLECTION_TYPE) - - def removeTransactionContainer(key:Long) = writeExecutor.sync { - client.removeCollection(key) - } - - - def loadCollections = { - val collections = writeExecutor.sync { - client.listCollections - } - var last = 0L - collections.foreach { case (key, record) => - last = key - record.getType match { - case QUEUE_COLLECTION_TYPE => - val dest = ActiveMQDestination.createDestination(record.getMeta.utf8().toString, ActiveMQDestination.QUEUE_TYPE).asInstanceOf[ActiveMQQueue] - parent.createQueueMessageStore(dest, key) - case TOPIC_COLLECTION_TYPE => - val dest = ActiveMQDestination.createDestination(record.getMeta.utf8().toString, ActiveMQDestination.TOPIC_TYPE).asInstanceOf[ActiveMQTopic] - parent.createTopicMessageStore(dest, key) - case SUBSCRIPTION_COLLECTION_TYPE => - val sr = SubscriptionRecord.FACTORY.parseUnframed(record.getMeta) - val info = new SubscriptionInfo - info.setClientId(sr.getClientId) - info.setSubscriptionName(sr.getSubscriptionName) - if( sr.hasSelector ) { - info.setSelector(sr.getSelector) - } - if( sr.hasDestinationName ) { - info.setDestination(ActiveMQDestination.createDestination(sr.getDestinationName, ActiveMQDestination.TOPIC_TYPE)) - } - if( sr.hasSubscribedDestinationName ) { - info.setSubscribedDestination(ActiveMQDestination.createDestination(sr.getSubscribedDestinationName, ActiveMQDestination.TOPIC_TYPE)) - } - - var sub = DurableSubscription(key, sr.getTopicKey, info) - sub.lastAckPosition = client.getAckPosition(key); - sub.gcPosition = sub.lastAckPosition - parent.createSubscription(sub) - case TRANSACTION_COLLECTION_TYPE => - val meta = record.getMeta - val txid = parent.wireFormat.unmarshal(new ByteSequence(meta.data, meta.offset, meta.length)).asInstanceOf[XATransactionId] - val transaction = parent.transaction(txid) - transaction.xacontainer_id = key - case _ => - } - } - lastCollectionKey.set(last) - } - - def createPList(name:String):LevelDBStore#LevelDBPList = { - parent.createPList(name, lastPListKey.incrementAndGet()) - } - - def destroyPList(key:Long) = writeExecutor.sync { - client.removePlist(key) - } - - def plistPut(key:Array[Byte], value:Array[Byte]) = client.plistPut(key, value) - def plistGet(key:Array[Byte]) = client.plistGet(key) - def plistDelete(key:Array[Byte]) = client.plistDelete(key) - def plistIterator = client.plistIterator - - def getMessage(x: MessageId):Message = { - val id = Option(pendingStores.get(x)).flatMap(_.headOption).map(_.id).getOrElse(x) - val locator = id.getDataLocator() - val msg = client.getMessage(locator) - if( msg!=null ) { - msg.setMessageId(id) - } else { - LevelDBStore.warn("Could not load messages for: "+x+" at: "+locator) - } - msg - } - -} diff --git a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/LevelDBClient.scala b/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/LevelDBClient.scala deleted file mode 100755 index b875fe0197..0000000000 --- a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/LevelDBClient.scala +++ /dev/null @@ -1,1721 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.activemq.leveldb - -import java.{lang=>jl} -import java.{util=>ju} - -import java.util.concurrent.locks.ReentrantReadWriteLock -import collection.immutable.TreeMap -import collection.mutable.{HashMap, ListBuffer} -import org.iq80.leveldb._ - -import org.fusesource.hawtdispatch._ -import record.{CollectionKey, EntryKey, EntryRecord, CollectionRecord} -import org.apache.activemq.leveldb.util._ -import java.util.concurrent._ -import org.fusesource.hawtbuf._ -import java.io._ -import scala.Option._ -import org.apache.activemq.command.{MessageAck, Message} -import org.apache.activemq.util.{IOExceptionSupport, ByteSequence} -import java.text.SimpleDateFormat -import java.util.{Date, Collections} -import org.fusesource.leveldbjni.internal.JniDB -import org.apache.activemq.ActiveMQMessageAuditNoSync -import org.apache.activemq.leveldb.util.TimeMetric -import org.fusesource.hawtbuf.ByteArrayInputStream -import org.apache.activemq.leveldb.RecordLog.LogInfo -import scala.Some -import scala.Serializable -import org.fusesource.hawtbuf.ByteArrayOutputStream -import org.apache.activemq.broker.SuppressReplyException - -/** - * @author Hiram Chirino - */ -object LevelDBClient extends Log { - - class WriteThread(r:Runnable) extends Thread(r) { - setDaemon(true) - } - - final val STORE_SCHEMA_PREFIX = "activemq_leveldb_store:" - final val STORE_SCHEMA_VERSION = 1 - - final val THREAD_POOL_STACK_SIZE = System.getProperty("leveldb.thread.stack.size", "" + 1024 * 512).toLong - final val THREAD_POOL: ThreadPoolExecutor = new ThreadPoolExecutor(0, Integer.MAX_VALUE, 10, TimeUnit.SECONDS, new SynchronousQueue[Runnable], new ThreadFactory { - def newThread(r: Runnable): Thread = { - var rc: Thread = new Thread(null, r, "LevelDB Store Task", THREAD_POOL_STACK_SIZE) - rc.setDaemon(true) - return rc - } - }) { - override def shutdown: Unit = {} - override def shutdownNow = Collections.emptyList[Runnable] - } - - val PLIST_WRITE_OPTIONS = new WriteOptions().sync(false) - - final val DIRTY_INDEX_KEY = bytes(":dirty") - final val LOG_REF_INDEX_KEY = bytes(":log-refs") - final val LOGS_INDEX_KEY = bytes(":logs") - final val PRODUCER_IDS_INDEX_KEY = bytes(":producer_ids") - - final val COLLECTION_META_KEY = bytes(":collection-meta") - final val TRUE = bytes("true") - final val FALSE = bytes("false") - final val ACK_POSITION = new AsciiBuffer("p") - - final val COLLECTION_PREFIX = 'c'.toByte - final val COLLECTION_PREFIX_ARRAY = Array(COLLECTION_PREFIX) - final val ENTRY_PREFIX = 'e'.toByte - final val ENTRY_PREFIX_ARRAY = Array(ENTRY_PREFIX) - - final val LOG_ADD_COLLECTION = 1.toByte - final val LOG_REMOVE_COLLECTION = 2.toByte - final val LOG_ADD_ENTRY = 3.toByte - final val LOG_REMOVE_ENTRY = 4.toByte - final val LOG_DATA = 5.toByte - final val LOG_TRACE = 6.toByte - final val LOG_UPDATE_ENTRY = 7.toByte - - final val LOG_SUFFIX = ".log" - final val INDEX_SUFFIX = ".index" - - implicit def toByteArray(buffer:Buffer) = buffer.toByteArray - implicit def toBuffer(buffer:Array[Byte]) = new Buffer(buffer) - - def encodeCollectionRecord(v: CollectionRecord.Buffer) = v.toUnframedByteArray - def decodeCollectionRecord(data: Buffer):CollectionRecord.Buffer = CollectionRecord.FACTORY.parseUnframed(data) - def encodeCollectionKeyRecord(v: CollectionKey.Buffer) = v.toUnframedByteArray - def decodeCollectionKeyRecord(data: Buffer):CollectionKey.Buffer = CollectionKey.FACTORY.parseUnframed(data) - - def encodeEntryRecord(v: EntryRecord.Buffer) = v.toUnframedBuffer - def decodeEntryRecord(data: Buffer):EntryRecord.Buffer = EntryRecord.FACTORY.parseUnframed(data) - - def encodeEntryKeyRecord(v: EntryKey.Buffer) = v.toUnframedByteArray - def decodeEntryKeyRecord(data: Buffer):EntryKey.Buffer = EntryKey.FACTORY.parseUnframed(data) - - def encodeLocator(pos:Long, len:Int):Array[Byte] = { - val out = new DataByteArrayOutputStream( - AbstractVarIntSupport.computeVarLongSize(pos)+ - AbstractVarIntSupport.computeVarIntSize(len) - ) - out.writeVarLong(pos) - out.writeVarInt(len) - out.getData - } - def decodeLocator(bytes:Buffer):(Long, Int) = { - val in = new DataByteArrayInputStream(bytes) - (in.readVarLong(), in.readVarInt()) - } - def decodeLocator(bytes:Array[Byte]):(Long, Int) = { - val in = new DataByteArrayInputStream(bytes) - (in.readVarLong(), in.readVarInt()) - } - - def encodeLongLong(a1:Long, a2:Long) = { - val out = new DataByteArrayOutputStream(8) - out.writeLong(a1) - out.writeLong(a2) - out.toBuffer - } - - def decodeLongLong(bytes:Array[Byte]):(Long, Long) = { - val in = new DataByteArrayInputStream(bytes) - (in.readLong(), in.readLong()) - } - - def encodeLong(a1:Long) = { - val out = new DataByteArrayOutputStream(8) - out.writeLong(a1) - out.toBuffer - } - - def encodeVLong(a1:Long):Array[Byte] = { - val out = new DataByteArrayOutputStream( - AbstractVarIntSupport.computeVarLongSize(a1) - ) - out.writeVarLong(a1) - out.getData - } - - def decodeVLong(bytes:Array[Byte]):Long = { - val in = new DataByteArrayInputStream(bytes) - in.readVarLong() - } - - def encodeLongKey(a1:Byte, a2:Long):Array[Byte] = { - val out = new DataByteArrayOutputStream(9) - out.writeByte(a1.toInt) - out.writeLong(a2) - out.getData - } - def decodeLongKey(bytes:Array[Byte]):(Byte, Long) = { - val in = new DataByteArrayInputStream(bytes) - (in.readByte(), in.readLong()) - } - - def decodeLong(bytes:Buffer):Long = { - val in = new DataByteArrayInputStream(bytes) - in.readLong() - } - def decodeLong(bytes:Array[Byte]):Long = { - val in = new DataByteArrayInputStream(bytes) - in.readLong() - } - - def encodeEntryKey(a1:Byte, a2:Long, a3:Long):Array[Byte] = { - val out = new DataByteArrayOutputStream(17) - out.writeByte(a1.toInt) - out.writeLong(a2) - out.writeLong(a3) - out.getData - } - - def encodeEntryKey(a1:Byte, a2:Long, a3:Buffer):Array[Byte] = { - val out = new DataByteArrayOutputStream(9+a3.length) - out.writeByte(a1.toInt) - out.writeLong(a2) - out.write(a3) - out.getData - } - - def decodeEntryKey(bytes:Array[Byte]):(Byte, Long, Buffer) = { - val in = new DataByteArrayInputStream(bytes) - (in.readByte(), in.readLong(), in.readBuffer(in.available())) - } - - final class RichDB(val db: DB) { - - val isPureJavaVersion = db.getClass.getName == "org.iq80.leveldb.impl.DbImpl" - - def getProperty(name:String) = db.getProperty(name) - - def getApproximateSizes(ranges:Range*) = db.getApproximateSizes(ranges:_*) - - def get(key:Array[Byte], ro:ReadOptions=new ReadOptions):Option[Array[Byte]] = { - Option(db.get(key, ro)) - } - - def close:Unit = db.close() - - def delete(key:Array[Byte], wo:WriteOptions=new WriteOptions):Unit = { - db.delete(key, wo) - } - - def put(key:Array[Byte], value:Array[Byte], wo:WriteOptions=new WriteOptions):Unit = { - db.put(key, value, wo) - } - - def write[T](wo:WriteOptions=new WriteOptions, max_write_latency:TimeMetric = TimeMetric())(func: WriteBatch=>T):T = { - val updates = db.createWriteBatch() - try { - val rc=Some(func(updates)) - max_write_latency { - db.write(updates, wo) - } - return rc.get - } finally { - updates.close(); - } - } - - def store[T](write:WriteBatch, wo:WriteOptions=new WriteOptions) = { - db.write(write, wo) - } - - def snapshot[T](func: Snapshot=>T):T = { - val snapshot = db.getSnapshot - try { - func(snapshot) - } finally { - snapshot.close() - } - } - - def cursorKeys(ro:ReadOptions=new ReadOptions)(func: Array[Byte] => Boolean): Unit = { - val iterator = db.iterator(ro) - iterator.seekToFirst(); - try { - while( iterator.hasNext && func(iterator.peekNext.getKey) ) { - iterator.next() - } - } finally { - iterator.close(); - } - } - - def cursorKeysPrefixed(prefix:Array[Byte], ro:ReadOptions=new ReadOptions)(func: Array[Byte] => Boolean): Unit = { - val iterator = db.iterator(ro) - might_trigger_compaction(iterator.seek(prefix)); - try { - def check(key:Buffer) = { - key.startsWith(prefix) && func(key) - } - while( iterator.hasNext && check(iterator.peekNext.getKey) ) { - iterator.next() - } - } finally { - iterator.close(); - } - } - - def cursorPrefixed(prefix:Array[Byte], ro:ReadOptions=new ReadOptions)(func: (Array[Byte],Array[Byte]) => Boolean): Unit = { - val iterator = db.iterator(ro) - might_trigger_compaction(iterator.seek(prefix)); - try { - def check(key:Buffer) = { - key.startsWith(prefix) && func(key, iterator.peekNext.getValue) - } - while( iterator.hasNext && check(iterator.peekNext.getKey) ) { - iterator.next() - } - } finally { - iterator.close(); - } - } - - def compare(a1:Array[Byte], a2:Array[Byte]):Int = { - new Buffer(a1).compareTo(new Buffer(a2)) - } - - def cursorRangeKeys(startIncluded:Array[Byte], endExcluded:Array[Byte], ro:ReadOptions=new ReadOptions)(func: Array[Byte] => Boolean): Unit = { - val iterator = db.iterator(ro) - might_trigger_compaction(iterator.seek(startIncluded)); - try { - def check(key:Array[Byte]) = { - if ( compare(key,endExcluded) < 0) { - func(key) - } else { - false - } - } - while( iterator.hasNext && check(iterator.peekNext.getKey) ) { - iterator.next() - } - } finally { - iterator.close(); - } - } - - def cursorRange(startIncluded:Array[Byte], endExcluded:Array[Byte], ro:ReadOptions=new ReadOptions)(func: (Array[Byte],Array[Byte]) => Boolean): Unit = { - val iterator = db.iterator(ro) - might_trigger_compaction(iterator.seek(startIncluded)); - try { - def check(key:Array[Byte]) = { - (compare(key,endExcluded) < 0) && func(key, iterator.peekNext.getValue) - } - while( iterator.hasNext && check(iterator.peekNext.getKey) ) { - iterator.next() - } - } finally { - iterator.close(); - } - } - - def lastKey(prefix:Array[Byte], ro:ReadOptions=new ReadOptions): Option[Array[Byte]] = { - val last = new Buffer(prefix).deepCopy().data - if ( last.length > 0 ) { - val pos = last.length-1 - last(pos) = (last(pos)+1).toByte - } - - if(isPureJavaVersion) { - // The pure java version of LevelDB does not support backward iteration. - var rc:Option[Array[Byte]] = None - cursorRangeKeys(prefix, last) { key=> - rc = Some(key) - true - } - rc - } else { - val iterator = db.iterator(ro) - try { - - might_trigger_compaction(iterator.seek(last)); - if ( iterator.hasPrev ) { - iterator.prev() - } else { - iterator.seekToLast() - } - - if ( iterator.hasNext ) { - val key:Buffer = iterator.peekNext.getKey - if(key.startsWith(prefix)) { - Some(key) - } else { - None - } - } else { - None - } - } finally { - iterator.close(); - } - } - } - - def compact = { - compact_needed = false - db match { - case db:JniDB => - db.compactRange(null, null) -// case db:DbImpl => -// val start = new Slice(Array[Byte]('a'.toByte)) -// val end = new Slice(Array[Byte]('z'.toByte)) -// db.compactRange(2, start, end) - case _ => - } - } - - private def might_trigger_compaction[T](func: => T): T = { - val start = System.nanoTime() - try { - func - } finally { - val duration = System.nanoTime() - start - // If it takes longer than 100 ms.. - if( duration > 1000000*100 ) { - compact_needed = true - } - } - } - - @volatile - var compact_needed = false - } - - - def bytes(value:String) = value.getBytes("UTF-8") - - import FileSupport._ - def create_sequence_file(directory:File, id:Long, suffix:String) = directory / ("%016x%s".format(id, suffix)) - - def find_sequence_files(directory:File, suffix:String):TreeMap[Long, File] = { - TreeMap((directory.list_files.flatMap { f=> - if( f.getName.endsWith(suffix) ) { - try { - val base = f.getName.stripSuffix(suffix) - val position = java.lang.Long.parseLong(base, 16); - Some(position -> f) - } catch { - case e:NumberFormatException => None - } - } else { - None - } - }): _* ) - } - - class CollectionMeta extends Serializable { - var size = 0L - var last_key:Array[Byte] = _ - } - - def copyIndex(from:File, to:File) = { - for( file <- from.list_files ) { - val name: String = file.getName - if( name.endsWith(".sst") ) { - // SST files don't change once created, safe to hard link. - file.linkTo(to / name) - } else if(name == "LOCK") { - // No need to copy the lock file. - } else { - /// These might not be append only files, so avoid hard linking just to be safe. - file.copyTo(to / name) - } - } - } -} - - -/** - * - * @author Hiram Chirino - */ -class LevelDBClient(store: LevelDBStore) { - - import LevelDBClient._ - import FileSupport._ - - val dispatchQueue = createQueue("leveldb") - - ///////////////////////////////////////////////////////////////////// - // - // Helpers - // - ///////////////////////////////////////////////////////////////////// - - def directory = store.directory - def logDirectory = Option(store.logDirectory).getOrElse(store.directory) - - ///////////////////////////////////////////////////////////////////// - // - // Public interface used by the DBManager - // - ///////////////////////////////////////////////////////////////////// - - def sync = store.sync; - def verifyChecksums = store.verifyChecksums - - var log:RecordLog = _ - - var index:RichDB = _ - var plist:RichDB = _ - var indexOptions:Options = _ - - var lastIndexSnapshotPos:Long = _ - val snapshotRwLock = new ReentrantReadWriteLock(true) - - var factory:DBFactory = _ - val logRefs = HashMap[Long, LongCounter]() - var recoveryLogs:java.util.TreeMap[Long, Void] = _ - - val collectionMeta = HashMap[Long, CollectionMeta]() - - def plistIndexFile = directory / ("plist"+INDEX_SUFFIX) - def dirtyIndexFile = directory / ("dirty"+INDEX_SUFFIX) - def tempIndexFile = directory / ("temp"+INDEX_SUFFIX) - def snapshotIndexFile(id:Long) = create_sequence_file(directory,id, INDEX_SUFFIX) - - def size: Long = logRefs.size * store.logSize - - def createLog: RecordLog = { - new RecordLog(logDirectory, LOG_SUFFIX) - } - - var writeExecutor:ExecutorService = _ - - def writeExecutorExec(func: =>Unit ) = writeExecutor { - func - } - - def storeTrace(ascii:String, force:Boolean=false) = { - assert_write_thread_executing - val time = new SimpleDateFormat("dd/MMM/yyyy:HH:mm::ss Z").format(new Date) - log.appender { appender => - appender.append(LOG_TRACE, new AsciiBuffer("%s: %s".format(time, ascii))) - if( force ) { - appender.force - } - } - } - - def might_fail[T](func : =>T):T = { - def handleFailure(e:IOException) = { - var failure:Throwable = e; - if( store.broker_service !=null ) { - // This should start stopping the broker but it might block, - // so do it on another thread... - new Thread("LevelDB IOException handler.") { - override def run() { - try { - store.broker_service.handleIOException(e) - } catch { - case e:RuntimeException => - failure = e - } finally { - store.stop() - } - } - }.start() - // Lets wait until the broker service has started stopping. Once the - // stopping flag is raised, errors caused by stopping the store should - // not get propagated to the client. - while( !store.broker_service.isStopping ) { - Thread.sleep(100); - } - } - throw new SuppressReplyException(failure); - } - try { - func - } catch { - case e:IOException => handleFailure(e) - case e:Throwable => handleFailure(IOExceptionSupport.create(e)) - } - } - - def start() = { - init() - replay_init() - might_fail { - log.open() - } - - var startPosition = lastIndexSnapshotPos; - // if we cannot locate a log for a snapshot, replay from - // first entry of first available log - if (log.log_info(startPosition).isEmpty) { - if (!log.log_infos.isEmpty) { - startPosition = log.log_infos.firstKey(); - } - } - - replay_from(startPosition, log.appender_limit) - replay_write_batch = null; - } - - def assert_write_thread_executing = assert(Thread.currentThread().getClass == classOf[WriteThread]) - - def init() ={ - - // Lets check store compatibility... - directory.mkdirs() - val version_file = directory / "store-version.txt" - if (version_file.exists()) { - val ver = try { - var tmp: String = version_file.readText().trim() - if (tmp.startsWith(STORE_SCHEMA_PREFIX)) { - tmp.stripPrefix(STORE_SCHEMA_PREFIX).toInt - } else { - -1 - } - } catch { - case e:Throwable => throw new Exception("Unexpected version file format: " + version_file) - } - ver match { - case STORE_SCHEMA_VERSION => // All is good. - case _ => throw new Exception("Cannot open the store. It's schema version is not supported.") - } - } - version_file.writeText(STORE_SCHEMA_PREFIX + STORE_SCHEMA_VERSION) - - writeExecutor = Executors.newFixedThreadPool(1, new ThreadFactory() { - def newThread(r: Runnable) = new WriteThread(r) - }) - - val factoryNames = store.indexFactory - factory = factoryNames.split("""(,|\s)+""").map(_.trim()).flatMap { name=> - try { - Some(this.getClass.getClassLoader.loadClass(name).newInstance().asInstanceOf[DBFactory]) - } catch { - case e:Throwable => - debug("Could not load factory: "+name+" due to: "+e) - None - } - }.headOption.getOrElse(throw new Exception("Could not load any of the index factory classes: "+factoryNames)) - - if( factory.getClass.getName == "org.iq80.leveldb.impl.Iq80DBFactory") { - info("Using the pure java LevelDB implementation.") - } - if( factory.getClass.getName == "org.fusesource.leveldbjni.JniDBFactory") { - info("Using the JNI LevelDB implementation.") - } - - indexOptions = new Options(); - indexOptions.createIfMissing(true); - - indexOptions.maxOpenFiles(store.indexMaxOpenFiles) - indexOptions.blockRestartInterval(store.indexBlockRestartInterval) - indexOptions.paranoidChecks(store.paranoidChecks) - indexOptions.writeBufferSize(store.indexWriteBufferSize) - indexOptions.blockSize(store.indexBlockSize) - indexOptions.compressionType( store.indexCompression.toLowerCase match { - case "snappy" => CompressionType.SNAPPY - case "none" => CompressionType.NONE - case _ => CompressionType.SNAPPY - }) - - indexOptions.cacheSize(store.indexCacheSize) - indexOptions.logger(new Logger() { - val LOG = Log(factory.getClass.getName) - def log(msg: String) = LOG.debug("index: "+msg.stripSuffix("\n")) - }) - - log = createLog - log.logSize = store.logSize - log.on_log_rotate = ()=> { - post_log_rotate - } - } - - def post_log_rotate ={ - // We snapshot the index every time we rotate the logs. - writeExecutor { - snapshotIndex(false) - } - } - - def replay_init() = { - // Find out what was the last snapshot. - val snapshots = find_sequence_files(directory, INDEX_SUFFIX) - var lastSnapshotIndex = snapshots.lastOption - lastIndexSnapshotPos = lastSnapshotIndex.map(_._1).getOrElse(0) - - // Only keep the last snapshot.. - snapshots.filterNot(_._1 == lastIndexSnapshotPos).foreach( _._2.recursiveDelete ) - tempIndexFile.recursiveDelete - - might_fail { - // Setup the plist index. - plistIndexFile.recursiveDelete - plistIndexFile.mkdirs() - plist = new RichDB(factory.open(plistIndexFile, indexOptions)); - - // Delete the dirty indexes - dirtyIndexFile.recursiveDelete - dirtyIndexFile.mkdirs() - - for( (id, file)<- lastSnapshotIndex ) { - try { - copyIndex(file, dirtyIndexFile) - debug("Recovering from last index snapshot at: "+dirtyIndexFile) - } catch { - case e:Exception => - warn(e, "Could not recover snapshot of the index: "+e) - lastSnapshotIndex = None - } - } - index = new RichDB(factory.open(dirtyIndexFile, indexOptions)); - for(value <- index.get(DIRTY_INDEX_KEY) ) { - if( java.util.Arrays.equals(value, TRUE) ) { - warn("Recovering from a dirty index.") - } - } - index.put(DIRTY_INDEX_KEY, TRUE) - loadCounters - } - } - - var replay_write_batch: WriteBatch = null - var indexRecoveryPosition = 0L - - def replay_from(from:Long, limit:Long, print_progress:Boolean=true) = { - debug("Replay of journal from: %d to %d.", from, limit) - if( replay_write_batch==null ) { - replay_write_batch = index.db.createWriteBatch() - } - might_fail { - try { - // Update the index /w what was stored on the logs.. - indexRecoveryPosition = from; - var last_reported_at = System.currentTimeMillis(); - var showing_progress = false - var last_reported_pos = 0L - try { - while (indexRecoveryPosition < limit) { - - if( print_progress ) { - val now = System.currentTimeMillis(); - if( now > last_reported_at+1000 ) { - val at = indexRecoveryPosition-from - val total = limit-from - val rate = (indexRecoveryPosition-last_reported_pos)*1000.0 / (now - last_reported_at) - val eta = (total-at)/rate - val remaining = if(eta > 60*60) { - "%.2f hrs".format(eta/(60*60)) - } else if(eta > 60) { - "%.2f mins".format(eta/60) - } else { - "%.0f secs".format(eta) - } - - System.out.print("Replaying recovery log: %f%% done (%,d/%,d bytes) @ %,.2f kb/s, %s remaining. \r".format( - at*100.0/total, at, total, rate/1024, remaining)) - showing_progress = true; - last_reported_at = now - last_reported_pos = indexRecoveryPosition - } - } - - - log.read(indexRecoveryPosition).map { - case (kind, data, nextPos) => - kind match { - case LOG_DATA => - val message = decodeMessage(data) - store.db.producerSequenceIdTracker.isDuplicate(message.getMessageId) - trace("Replay of LOG_DATA at %d, message id: ", indexRecoveryPosition, message.getMessageId) - - case LOG_ADD_COLLECTION => - val record= decodeCollectionRecord(data) - replay_write_batch.put(encodeLongKey(COLLECTION_PREFIX, record.getKey), data) - collectionMeta.put(record.getKey, new CollectionMeta) - trace("Replay of LOG_ADD_COLLECTION at %d, collection: %s", indexRecoveryPosition, record.getKey) - - case LOG_REMOVE_COLLECTION => - val record = decodeCollectionKeyRecord(data) - // Delete the entries in the collection. - index.cursorPrefixed(encodeLongKey(ENTRY_PREFIX, record.getKey), new ReadOptions) { (key, value)=> - val record = decodeEntryRecord(value) - val pos = if ( record.hasValueLocation ) { - Some(record.getValueLocation) - } else { - None - } - pos.foreach(logRefDecrement(_)) - index.delete(key) - true - } - index.delete(data) - collectionMeta.remove(record.getKey) - trace("Replay of LOG_REMOVE_COLLECTION at %d, collection: %s", indexRecoveryPosition, record.getKey) - - case LOG_ADD_ENTRY | LOG_UPDATE_ENTRY => - val record = decodeEntryRecord(data) - - val index_record = new EntryRecord.Bean() - index_record.setValueLocation(record.getValueLocation) - if( record.hasValueLength ) { - index_record.setValueLength(record.getValueLength) - } - val index_value = encodeEntryRecord(index_record.freeze()).toByteArray - - replay_write_batch.put(encodeEntryKey(ENTRY_PREFIX, record.getCollectionKey, record.getEntryKey), index_value) - - if( kind==LOG_ADD_ENTRY ) { - logRefIncrement(record.getValueLocation) - collectionIncrementSize(record.getCollectionKey, record.getEntryKey.toByteArray) - trace("Replay of LOG_ADD_ENTRY at %d, collection: %s, entry: %s", indexRecoveryPosition, record.getCollectionKey, record.getEntryKey) - } else { - trace("Replay of LOG_UPDATE_ENTRY at %d, collection: %s, entry: %s", indexRecoveryPosition, record.getCollectionKey, record.getEntryKey) - } - - case LOG_REMOVE_ENTRY => - val record = decodeEntryRecord(data) - - // Figure out which log file this message reference is pointing at.. - if ( record.hasValueLocation ) { - logRefDecrement(record.getValueLocation) - } - - replay_write_batch.delete(encodeEntryKey(ENTRY_PREFIX, record.getCollectionKey, record.getEntryKey)) - collectionDecrementSize( record.getCollectionKey) - trace("Replay of LOG_REMOVE_ENTRY collection: %s, entry: %s", indexRecoveryPosition, record.getCollectionKey, record.getEntryKey) - - case LOG_TRACE => - trace("Replay of LOG_TRACE, message: %s", indexRecoveryPosition, data.ascii()) - case RecordLog.UOW_END_RECORD => - trace("Replay of UOW_END_RECORD") - index.db.write(replay_write_batch) - replay_write_batch=index.db.createWriteBatch() - case kind => // Skip other records, they don't modify the index. - trace("Skipping replay of %d record kind at %d", kind, indexRecoveryPosition) - - } - indexRecoveryPosition = nextPos - } - } - } - catch { - case e:Throwable => e.printStackTrace() - } - if(showing_progress) { - System.out.println("Replaying recovery log: 100% done "); - } - - } catch { - case e:Throwable => - // replay failed.. good thing we are in a retry block... - index.close - replay_write_batch = null - throw e; - } finally { - recoveryLogs = null - debug("Replay end") - } - } - } - - private def logRefDecrement(pos: Long) { - for( key <- logRefKey(pos) ) { - logRefs.get(key) match { - case Some(counter) => counter.decrementAndGet() == 0 - case None => warn("invalid: logRefDecrement: "+pos) - } - } - } - - private def logRefIncrement(pos: Long) { - for( key <- logRefKey(pos) ) { - logRefs.getOrElseUpdate(key, new LongCounter(0)).incrementAndGet() - } - } - - def logRefKey(pos: Long, log_info: RecordLog.LogInfo=null): Option[Long] = { - if( log_info!=null ) { - Some(log_info.position) - } else { - val rc = if( recoveryLogs !=null ) { - Option(recoveryLogs.floorKey(pos)) - } else { - log.log_info(pos).map(_.position) - } - if( !rc.isDefined ) { - warn("Invalid log position: " + pos) - } - rc - } - } - - private def collectionDecrementSize(key: Long) { - collectionMeta.get(key).foreach(_.size -= 1) - } - private def collectionIncrementSize(key: Long, last_key:Array[Byte]) { - collectionMeta.get(key).foreach{ x=> - x.size += 1 - x.last_key = last_key - } - } - - private def storeCounters = { - def storeMap[T <: AnyRef](key:Array[Byte], map:HashMap[Long, T]) { - val baos = new ByteArrayOutputStream() - val os = new ObjectOutputStream(baos); - os.writeInt(map.size); - for( (k,v) <- map ) { - os.writeLong(k) - os.writeObject(v) - } - os.close() - try { - index.put(key, baos.toByteArray) - } - catch { - case e : Throwable => throw e - } - } - def storeList[T <: AnyRef](key:Array[Byte], list:Array[Long]) { - val baos = new ByteArrayOutputStream() - val os = new ObjectOutputStream(baos); - os.writeInt(list.size); - for( k <- list ) { - os.writeLong(k) - } - os.close() - try { - index.put(key, baos.toByteArray) - } - catch { - case e : Throwable => throw e - } - } - def storeObject(key:Array[Byte], o:Object) = { - val baos = new ByteArrayOutputStream() - val os = new ObjectOutputStream(baos); - os.writeObject(o) - os.close() - index.put(key, baos.toByteArray) - } - - storeMap(LOG_REF_INDEX_KEY, logRefs) - storeMap(COLLECTION_META_KEY, collectionMeta) - storeList(LOGS_INDEX_KEY, log.log_file_positions) - storeObject(PRODUCER_IDS_INDEX_KEY, store.db.producerSequenceIdTracker) - - } - - private def loadCounters = { - def loadMap[T <: AnyRef](key:Array[Byte], map:HashMap[Long, T]) { - map.clear() - index.get(key, new ReadOptions).foreach { value=> - val bais = new ByteArrayInputStream(value) - val is = new ObjectInputStream(bais); - var remaining = is.readInt() - while(remaining > 0 ) { - map.put(is.readLong(), is.readObject().asInstanceOf[T]) - remaining-=1 - } - } - } - def loadList[T <: AnyRef](key:Array[Byte]) = { - index.get(key, new ReadOptions).map { value=> - val rc = ListBuffer[Long]() - val bais = new ByteArrayInputStream(value) - val is = new ObjectInputStream(bais); - var remaining = is.readInt() - while(remaining > 0 ) { - rc.append(is.readLong()) - remaining-=1 - } - rc - } - } - def loadObject(key:Array[Byte]) = { - index.get(key, new ReadOptions).map { value=> - val bais = new ByteArrayInputStream(value) - val is = new ObjectInputStream(bais); - is.readObject(); - } - } - - loadMap(LOG_REF_INDEX_KEY, logRefs) - loadMap(COLLECTION_META_KEY, collectionMeta) - for( list <- loadList(LOGS_INDEX_KEY) ) { - recoveryLogs = new java.util.TreeMap[Long, Void]() - for( k <- list ) { - recoveryLogs.put(k, null) - } - } - for( audit <- loadObject(PRODUCER_IDS_INDEX_KEY) ) { - store.db.producerSequenceIdTracker = audit.asInstanceOf[ActiveMQMessageAuditNoSync] - } - } - - - var stored_wal_append_position = 0L - - def wal_append_position = this.synchronized { - if (log!=null && log.isOpen) { - log.appender_limit - } else { - stored_wal_append_position - } - } - - def dirty_stop = this.synchronized { - def ingorefailure(func: =>Unit) = try { func } catch { case e:Throwable=> } - ingorefailure(index.close) - ingorefailure(log.close) - ingorefailure(plist.close) - ingorefailure(might_fail(throw new IOException("non-clean close"))) - } - - def stop():Unit = { - var executorToShutdown:ExecutorService = null - this synchronized { - if (writeExecutor != null) { - executorToShutdown = writeExecutor - writeExecutor = null - } - } - - if (executorToShutdown != null) { - executorToShutdown.shutdown - executorToShutdown.awaitTermination(60, TimeUnit.SECONDS) - - // this blocks until all io completes.. - snapshotRwLock.writeLock().lock() - try { - // Suspend also deletes the index. - if( index!=null ) { - storeCounters - index.put(DIRTY_INDEX_KEY, FALSE, new WriteOptions().sync(true)) - index.close - index = null - debug("Gracefuly closed the index") - copyDirtyIndexToSnapshot - } - this synchronized { - if (log!=null && log.isOpen) { - log.close - stored_wal_append_position = log.appender_limit - log = null - } - } - if( plist!=null ) { - plist.close - plist=null - } - } finally { - snapshotRwLock.writeLock().unlock() - } - } - } - - def usingIndex[T](func: =>T):T = { - val lock = snapshotRwLock.readLock(); - lock.lock() - try { - func - } finally { - lock.unlock() - } - } - - def might_fail_using_index[T](func: =>T):T = might_fail(usingIndex( func )) - - /** - * TODO: expose this via management APIs, handy if you want to - * do a file system level snapshot and want the data to be consistent. - */ - def suspend() = { - // Make sure we are the only ones accessing the index. since - // we will be closing it to create a consistent snapshot. - snapshotRwLock.writeLock().lock() - - storeCounters - index.put(DIRTY_INDEX_KEY, FALSE, new WriteOptions().sync(true)) - // Suspend the index so that it's files are not changed async on us. - index.db.suspendCompactions() - } - - /** - * TODO: expose this via management APIs, handy if you want to - * do a file system level snapshot and want the data to be consistent. - */ - def resume() = { - // re=open it.. - index.db.resumeCompactions() - snapshotRwLock.writeLock().unlock() - } - - def nextIndexSnapshotPos:Long = wal_append_position - - def copyDirtyIndexToSnapshot:Unit = { - if( nextIndexSnapshotPos == lastIndexSnapshotPos ) { - // no need to snapshot again... - return - } - copyDirtyIndexToSnapshot(nextIndexSnapshotPos) - } - - def copyDirtyIndexToSnapshot(walPosition:Long):Unit = { - debug("Taking a snapshot of the current index: "+snapshotIndexFile(walPosition)) - // Where we start copying files into. Delete this on - // restart. - val tmpDir = tempIndexFile - tmpDir.mkdirs() - - try { - - // Copy the index to the tmp dir. - copyIndex(dirtyIndexFile, tmpDir) - - // Rename to signal that the snapshot is complete. - tmpDir.renameTo(snapshotIndexFile(walPosition)) - replaceLatestSnapshotDirectory(walPosition) - - } catch { - case e: Exception => - // if we could not snapshot for any reason, delete it as we don't - // want a partial check point.. - warn(e, "Could not snapshot the index: " + e) - tmpDir.recursiveDelete - } - } - - def replaceLatestSnapshotDirectory(newSnapshotIndexPos: Long) { - snapshotIndexFile(lastIndexSnapshotPos).recursiveDelete - lastIndexSnapshotPos = newSnapshotIndexPos - } - - def snapshotIndex(sync:Boolean=false):Unit = { - suspend() - try { - if( sync ) { - log.current_appender.force - } - copyDirtyIndexToSnapshot - } finally { - resume() - } - } - - def purge() = { - suspend() - try{ - log.close - locked_purge - } finally { - might_fail { - log.open() - } - resume() - } - } - - def locked_purge { - for( x <- logDirectory.list_files) { - if (x.getName.endsWith(".log")) { - x.delete() - } - } - for( x <- directory.list_files) { - if (x.getName.endsWith(".index")) { - x.recursiveDelete - } - } - } - - def addCollection(record: CollectionRecord.Buffer) = { - assert_write_thread_executing - - val key = encodeLongKey(COLLECTION_PREFIX, record.getKey) - val value = record.toUnframedBuffer - might_fail_using_index { - log.appender { appender => - appender.append(LOG_ADD_COLLECTION, value) - index.put(key, value.toByteArray) - } - } - collectionMeta.put(record.getKey, new CollectionMeta) - } - - def getLogAppendPosition = log.appender_limit - - def listCollections: Seq[(Long, CollectionRecord.Buffer)] = { - val rc = ListBuffer[(Long, CollectionRecord.Buffer)]() - might_fail_using_index { - val ro = new ReadOptions - ro.verifyChecksums(verifyChecksums) - ro.fillCache(false) - index.cursorPrefixed(COLLECTION_PREFIX_ARRAY, ro) { (key, value) => - rc.append(( decodeLongKey(key)._2, CollectionRecord.FACTORY.parseUnframed(value) )) - true // to continue cursoring. - } - } - rc - } - - def removeCollection(collectionKey: Long) = { - assert_write_thread_executing - val key = encodeLongKey(COLLECTION_PREFIX, collectionKey) - val value = encodeVLong(collectionKey) - val entryKeyPrefix = encodeLongKey(ENTRY_PREFIX, collectionKey) - collectionMeta.remove(collectionKey) - might_fail_using_index { - log.appender { appender => - appender.append(LOG_REMOVE_COLLECTION, new Buffer(value)) - } - - val ro = new ReadOptions - ro.fillCache(false) - ro.verifyChecksums(verifyChecksums) - index.cursorPrefixed(entryKeyPrefix, ro) { (key, value)=> - val record = decodeEntryRecord(value) - val pos = if ( record.hasValueLocation ) { - Some(record.getValueLocation) - } else { - None - } - pos.foreach(logRefDecrement(_)) - index.delete(key) - true - } - index.delete(key) - } - } - - def collectionEmpty(collectionKey: Long) = { - assert_write_thread_executing - val key = encodeLongKey(COLLECTION_PREFIX, collectionKey) - val value = encodeVLong(collectionKey) - val entryKeyPrefix = encodeLongKey(ENTRY_PREFIX, collectionKey) - - val meta = collectionMeta.getOrElseUpdate(collectionKey, new CollectionMeta) - meta.size = 0 - meta.last_key = null - - might_fail_using_index { - index.get(key).foreach { collectionData => - log.appender { appender => - appender.append(LOG_REMOVE_COLLECTION, new Buffer(value)) - appender.append(LOG_ADD_COLLECTION, new Buffer(collectionData)) - } - - val ro = new ReadOptions - ro.fillCache(false) - ro.verifyChecksums(verifyChecksums) - index.cursorPrefixed(entryKeyPrefix, ro) { (key, value)=> - val record = decodeEntryRecord(value) - val pos = if ( record.hasValueLocation ) { - Some(record.getValueLocation) - } else { - None - } - pos.foreach(logRefDecrement(_)) - index.delete(key) - true - } - } - } - } - - def decodeQueueEntryMeta(value:EntryRecord.Getter):Int= { - if( value.hasMeta ) { - val is = new DataByteArrayInputStream(value.getMeta); - val metaVersion = is.readVarInt() - metaVersion match { - case 1 => - return is.readVarInt() - case _ => - } - } - return 0 - } - - def getDeliveryCounter(collectionKey: Long, seq:Long):Int = { - val ro = new ReadOptions - ro.fillCache(true) - ro.verifyChecksums(verifyChecksums) - val key = encodeEntryKey(ENTRY_PREFIX, collectionKey, encodeLong(seq)) - var rc = 0 - might_fail_using_index { - for( v <- index.get(key, ro) ) { - rc = decodeQueueEntryMeta(EntryRecord.FACTORY.parseUnframed(v)) - } - } - return rc - } - - def queueCursor(collectionKey: Long, seq:Long, endSeq:Long)(func: (Message)=>Boolean) = { - collectionCursor(collectionKey, encodeLong(seq), encodeLong(endSeq)) { (key, value) => - val seq = decodeLong(key) - var locator = DataLocator(store, value.getValueLocation, value.getValueLength) - val msg = getMessage(locator) - if( msg !=null ) { - msg.getMessageId().setEntryLocator(EntryLocator(collectionKey, seq)) - msg.getMessageId().setDataLocator(locator) - msg.setRedeliveryCounter(decodeQueueEntryMeta(value)) - func(msg) - } else { - warn("Could not load message seq: "+seq+" from "+locator) - true - } - } - } - - def transactionCursor(collectionKey: Long)(func: (AnyRef)=>Boolean) = { - collectionCursor(collectionKey, encodeLong(0), encodeLong(Long.MaxValue)) { (key, value) => - val seq = decodeLong(key) - if( value.getMeta != null ) { - - val is = new DataByteArrayInputStream(value.getMeta); - val log = is.readLong() - val offset = is.readInt() - val qid = is.readLong() - val seq = is.readLong() - val sub = is.readLong() - val ack = store.wireFormat.unmarshal(is).asInstanceOf[MessageAck] - ack.getLastMessageId.setDataLocator(DataLocator(store, log, offset)) - ack.getLastMessageId.setEntryLocator(EntryLocator(qid, seq)) - - func(XaAckRecord(collectionKey, seq, ack, sub)) - } else { - var locator = DataLocator(store, value.getValueLocation, value.getValueLength) - val msg = getMessage(locator) - if( msg !=null ) { - msg.getMessageId().setEntryLocator(EntryLocator(collectionKey, seq)) - msg.getMessageId().setDataLocator(locator) - func(msg) - } else { - warn("Could not load XA message seq: "+seq+" from "+locator) - true - } - } - } - } - - def getAckPosition(subKey: Long): Long = { - might_fail_using_index { - index.get(encodeEntryKey(ENTRY_PREFIX, subKey, ACK_POSITION)).map{ value=> - val record = decodeEntryRecord(value) - record.getValueLocation() - }.getOrElse(0L) - } - } - - def getMessage(locator:AnyRef):Message = { - assert(locator!=null) - val buffer = locator match { - case x:MessageRecord => - // Encoded form is still in memory.. - Some(x.data) - case DataLocator(store, pos, len) => - // Load the encoded form from disk. - log.read(pos, len).map(new Buffer(_)) - } - - // Lets decode - buffer.map(decodeMessage(_)).getOrElse(null) - } - - def decodeMessage(x: Buffer): Message = { - var data = if (store.snappyCompressLogs) { - Snappy.uncompress(x) - } else { - x - } - store.wireFormat.unmarshal(new ByteSequence(data.data, data.offset, data.length)).asInstanceOf[Message] - } - - def collectionCursor(collectionKey: Long, cursorPosition:Buffer, endCursorPosition:Buffer)(func: (Buffer, EntryRecord.Buffer)=>Boolean) = { - val ro = new ReadOptions - ro.fillCache(true) - ro.verifyChecksums(verifyChecksums) - val start = encodeEntryKey(ENTRY_PREFIX, collectionKey, cursorPosition) - val end = encodeEntryKey(ENTRY_PREFIX, collectionKey, endCursorPosition) - might_fail_using_index { - index.cursorRange(start, end, ro) { case (key, value) => - func(key.buffer.moveHead(9), EntryRecord.FACTORY.parseUnframed(value)) - } - } - } - - def collectionSize(collectionKey: Long) = { - collectionMeta.get(collectionKey).map(_.size).getOrElse(0L) - } - - def collectionIsEmpty(collectionKey: Long) = { - val entryKeyPrefix = encodeLongKey(ENTRY_PREFIX, collectionKey) - var empty = true - might_fail_using_index { - val ro = new ReadOptions - ro.fillCache(false) - ro.verifyChecksums(verifyChecksums) - index.cursorKeysPrefixed(entryKeyPrefix, ro) { key => - empty = false - false - } - } - empty - } - - val max_write_message_latency = TimeMetric() - val max_write_enqueue_latency = TimeMetric() - - val max_index_write_latency = TimeMetric() - - def store(uows: Array[DelayableUOW]) { - assert_write_thread_executing - might_fail_using_index { - log.appender { appender => - val syncNeeded = index.write(new WriteOptions, max_index_write_latency) { batch => - write_uows(uows, appender, batch) - } - if( syncNeeded && sync ) { - appender.force - } - } // end of log.appender { block } - - // now that data is logged.. locate message from the data in the logs - for( uow <- uows ) { - for((msg, action) <- uow.actions ){ - val messageRecord = action.messageRecord - if (messageRecord != null) { - messageRecord.id.setDataLocator(messageRecord.locator) - } - } - } - } - } - - - def write_uows(uows: Array[DelayableUOW], appender: RecordLog#LogAppender, batch: WriteBatch) = { - var syncNeeded = false - var write_message_total = 0L - var write_enqueue_total = 0L - - for( uow <- uows ) { - for( (msg, action) <- uow.actions ) { - val messageRecord = action.messageRecord - var log_info: LogInfo = null - var dataLocator: DataLocator = null - - if (messageRecord != null && messageRecord.locator == null) { - store.db.producerSequenceIdTracker.isDuplicate(messageRecord.id) - val start = System.nanoTime() - val p = appender.append(LOG_DATA, messageRecord.data) - log_info = p._2 - dataLocator = DataLocator(store, p._1, messageRecord.data.length) - messageRecord.locator = dataLocator -// println("msg: "+messageRecord.id+" -> "+dataLocator) - write_message_total += System.nanoTime() - start - } - - - for( entry <- action.dequeues) { - val keyLocation = entry.id.getEntryLocator.asInstanceOf[EntryLocator] - val key = encodeEntryKey(ENTRY_PREFIX, keyLocation.qid, keyLocation.seq) - - if (dataLocator == null) { - dataLocator = entry.id.getDataLocator match { - case x: DataLocator => x - case x: MessageRecord => x.locator - case _ => throw new RuntimeException("Unexpected locator type: " + dataLocator) - } - } - -// println("deq: "+entry.id+" -> "+dataLocator) - val log_record = new EntryRecord.Bean() - log_record.setCollectionKey(entry.queueKey) - log_record.setEntryKey(new Buffer(key, 9, 8)) - log_record.setValueLocation(dataLocator.pos) - appender.append(LOG_REMOVE_ENTRY, encodeEntryRecord(log_record.freeze())) - - batch.delete(key) - logRefDecrement(dataLocator.pos) - collectionDecrementSize(entry.queueKey) - } - - for( entry<- action.enqueues) { - - if (dataLocator == null) { - dataLocator = entry.id.getDataLocator match { - case x: DataLocator => x - case x: MessageRecord => x.locator - case _ => - throw new RuntimeException("Unexpected locator type") - } - } - -// println("enq: "+entry.id+" -> "+dataLocator) - val start = System.nanoTime() - - val key = encodeEntryKey(ENTRY_PREFIX, entry.queueKey, entry.queueSeq) - - assert(entry.id.getDataLocator() != null) - - val log_record = new EntryRecord.Bean() - log_record.setCollectionKey(entry.queueKey) - log_record.setEntryKey(new Buffer(key, 9, 8)) - log_record.setValueLocation(dataLocator.pos) - log_record.setValueLength(dataLocator.len) - - val kind = if (entry.deliveries==0) LOG_ADD_ENTRY else LOG_UPDATE_ENTRY - appender.append(kind, encodeEntryRecord(log_record.freeze())) - - val index_record = new EntryRecord.Bean() - index_record.setValueLocation(dataLocator.pos) - index_record.setValueLength(dataLocator.len) - - // Store the delivery counter. - if( entry.deliveries!=0 ) { - val os = new DataByteArrayOutputStream() - os.writeVarInt(1) // meta data format version - os.writeVarInt(entry.deliveries) - index_record.setMeta(os.toBuffer) - } - - val index_data = encodeEntryRecord(index_record.freeze()).toByteArray - batch.put(key, index_data) - - if( kind==LOG_ADD_ENTRY ) { - logRefIncrement(dataLocator.pos) - collectionIncrementSize(entry.queueKey, log_record.getEntryKey.toByteArray) - } - - write_enqueue_total += System.nanoTime() - start - } - - for( entry <- action.xaAcks ) { - - val ack = entry.ack - if (dataLocator == null) { - dataLocator = ack.getLastMessageId.getDataLocator match { - case x: DataLocator => x - case x: MessageRecord => x.locator - case _ => - throw new RuntimeException("Unexpected locator type") - } - } -// println(dataLocator) - - val el = ack.getLastMessageId.getEntryLocator.asInstanceOf[EntryLocator]; - val os = new DataByteArrayOutputStream() - os.writeLong(dataLocator.pos) - os.writeInt(dataLocator.len) - os.writeLong(el.qid) - os.writeLong(el.seq) - os.writeLong(entry.sub) - store.wireFormat.marshal(ack, os) - var ack_encoded = os.toBuffer - - val key = encodeEntryKey(ENTRY_PREFIX, entry.container, entry.seq) - val log_record = new EntryRecord.Bean() - log_record.setCollectionKey(entry.container) - log_record.setEntryKey(new Buffer(key, 9, 8)) - log_record.setMeta(ack_encoded) - appender.append(LOG_ADD_ENTRY, encodeEntryRecord(log_record.freeze())) - val index_record = new EntryRecord.Bean() - index_record.setMeta(ack_encoded) - batch.put(key, encodeEntryRecord(log_record.freeze()).toByteArray) - } - } - - for( entry <- uow.subAcks ) { - val key = encodeEntryKey(ENTRY_PREFIX, entry.subKey, ACK_POSITION) - val log_record = new EntryRecord.Bean() - log_record.setCollectionKey(entry.subKey) - log_record.setEntryKey(ACK_POSITION) - log_record.setValueLocation(entry.ackPosition) - appender.append(LOG_UPDATE_ENTRY, encodeEntryRecord(log_record.freeze())) - - val index_record = new EntryRecord.Bean() - index_record.setValueLocation(entry.ackPosition) - batch.put(key, encodeEntryRecord(index_record.freeze()).toByteArray) - } - - if (uow.syncNeeded) { - syncNeeded = true - } - } - - max_write_message_latency.add(write_message_total) - max_write_enqueue_latency.add(write_enqueue_total) - syncNeeded - } - - def getCollectionEntries(collectionKey: Long, firstSeq:Long, lastSeq:Long): Seq[(Buffer, EntryRecord.Buffer)] = { - var rc = ListBuffer[(Buffer, EntryRecord.Buffer)]() - val ro = new ReadOptions - ro.verifyChecksums(verifyChecksums) - ro.fillCache(true) - might_fail_using_index { - index.snapshot { snapshot => - ro.snapshot(snapshot) - val start = encodeEntryKey(ENTRY_PREFIX, collectionKey, firstSeq) - val end = encodeEntryKey(ENTRY_PREFIX, collectionKey, lastSeq+1) - index.cursorRange( start, end, ro ) { (key, value) => - val (_, _, seq) = decodeEntryKey(key) - rc.append((seq, EntryRecord.FACTORY.parseUnframed(value))) - true - } - } - } - rc - } - - def getLastQueueEntrySeq(collectionKey: Long): Long = { - getLastCollectionEntryKey(collectionKey).map(_.bigEndianEditor().readLong()).getOrElse(0L) - } - - def getLastCollectionEntryKey(collectionKey: Long): Option[Buffer] = { - collectionMeta.get(collectionKey).flatMap(x=> Option(x.last_key)).map(new Buffer(_)) - } - - // APLO-245: lets try to detect when leveldb needs a compaction.. - private def detect_if_compact_needed:Unit = { - - // auto compaction might be disabled... - if ( store.autoCompactionRatio <= 0 ) { - return - } - - // How much space is the dirty index using?? - var index_usage = 0L - for( file <- dirtyIndexFile.recursiveList ) { - if(!file.isDirectory && file.getName.endsWith(".sst") ) { - index_usage += file.length() - } - } - - // Lets use the log_refs to get a rough estimate on how many entries are store in leveldb. - var index_queue_entries=0L - for ( (_, count) <- logRefs ) { - index_queue_entries += count.get() - } - - // Don't force compactions until level 0 is full. - val SSL_FILE_SIZE = 1024*1024*4L - if( index_usage > SSL_FILE_SIZE*10 ) { - if ( index_queue_entries > 0 ) { - val ratio = (index_usage*1.0f/index_queue_entries) - // println("usage: index_usage:%d, index_queue_entries:%d, ratio: %f".format(index_usage, index_queue_entries, ratio)) - - // lets compact if we go way over the healthy ratio. - if( ratio > store.autoCompactionRatio ) { - index.compact_needed = true - } - } else { - // at most the index should have 1 full level file. - index.compact_needed = true - } - } - - } - - def gc(topicPositions:Seq[(Long, Long)]):Unit = { - - // Delete message refs for topics who's consumers have advanced.. - if( !topicPositions.isEmpty ) { - might_fail_using_index { - index.write(new WriteOptions, max_index_write_latency) { batch => - for( (topic, first) <- topicPositions ) { - val ro = new ReadOptions - ro.fillCache(true) - ro.verifyChecksums(verifyChecksums) - val start = encodeEntryKey(ENTRY_PREFIX, topic, 0) - val end = encodeEntryKey(ENTRY_PREFIX, topic, first) - debug("Topic: %d GC to seq: %d", topic, first) - index.cursorRange(start, end, ro) { case (key, value) => - val entry = EntryRecord.FACTORY.parseUnframed(value) - batch.delete(key) - logRefDecrement(entry.getValueLocation) - true - } - } - } - } - } - - detect_if_compact_needed - - // Lets compact the leveldb index if it looks like we need to. - if( index.compact_needed ) { - val start = System.nanoTime() - index.compact - val duration = System.nanoTime() - start; - info("Compacted the leveldb index at: %s in %.2f ms", dirtyIndexFile, (duration / 1000000.0)) - } - - import collection.JavaConversions._ - - // drop the logs that are no longer referenced. - for( (x,y) <- logRefs.toSeq ) { - if( y.get() <= 0 ) { - if( y.get() < 0 ) { - warn("Found a negative log reference for log: "+x) - } - debug("Log no longer referenced: %x", x) - logRefs.remove(x) - } - } - - val emptyJournals = log.log_infos.keySet.toSet -- logRefs.keySet - - // We don't want to delete any journals that the index has not snapshot'ed or - // the the - - var limit = oldest_retained_snapshot - val deleteLimit = logRefKey(limit).getOrElse(limit).min(log.appender_start) - - emptyJournals.foreach { id => - if ( id < deleteLimit ) { - debug("Deleting log at %x", id) - log.delete(id) - } - } - } - - def oldest_retained_snapshot = lastIndexSnapshotPos - - def removePlist(collectionKey: Long) = { - val entryKeyPrefix = encodeLong(collectionKey) - collectionMeta.remove(collectionKey) - might_fail { - val ro = new ReadOptions - ro.fillCache(false) - ro.verifyChecksums(false) - plist.cursorPrefixed(entryKeyPrefix, ro) { (key, value)=> - plist.delete(key) - true - } - } - } - - def plistPut(key:Array[Byte], value:Array[Byte]) = plist.put(key, value, PLIST_WRITE_OPTIONS) - def plistDelete(key:Array[Byte]) = plist.delete(key, PLIST_WRITE_OPTIONS) - def plistGet(key:Array[Byte]) = plist.get(key) - def plistIterator = plist.db.iterator() - -} diff --git a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/LevelDBStore.scala b/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/LevelDBStore.scala deleted file mode 100644 index 593ec9e23e..0000000000 --- a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/LevelDBStore.scala +++ /dev/null @@ -1,1152 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.activemq.leveldb - -import org.apache.activemq.broker.{SuppressReplyException, LockableServiceSupport, BrokerServiceAware, ConnectionContext} -import org.apache.activemq.command._ -import org.apache.activemq.openwire.OpenWireFormat -import org.apache.activemq.usage.SystemUsage -import java.io.File -import java.io.IOException -import java.util.concurrent._ -import java.util.concurrent.atomic.AtomicLong -import beans.BeanProperty -import org.apache.activemq.store._ -import java.util._ -import collection.mutable.ListBuffer -import org.apache.activemq.broker.jmx.{BrokerMBeanSupport, AnnotatedMBean} -import org.apache.activemq.util._ -import org.apache.activemq.leveldb.util.Log -import org.apache.activemq.store.PList.PListIterator -import org.fusesource.hawtbuf.{UTF8Buffer, DataByteArrayOutputStream} -import org.fusesource.hawtdispatch; -import org.apache.activemq.broker.scheduler.JobSchedulerStore -import org.apache.activemq.store.IndexListener.MessageContext -import javax.management.ObjectName - -object LevelDBStore extends Log { - val DEFAULT_DIRECTORY = new File("LevelDB"); - - lazy val BLOCKING_EXECUTOR = new ThreadPoolExecutor(0, Integer.MAX_VALUE, 10, TimeUnit.SECONDS, new SynchronousQueue[Runnable](), new ThreadFactory() { - def newThread(r:Runnable) = { - val rc = new Thread(null, r, "ActiveMQ Task"); - rc.setDaemon(true); - rc - } - }) - - val DONE = new InlineListenableFuture; - - def toIOException(e: Throwable): IOException = { - if (e.isInstanceOf[ExecutionException]) { - var cause: Throwable = (e.asInstanceOf[ExecutionException]).getCause - if (cause.isInstanceOf[IOException]) { - return cause.asInstanceOf[IOException] - } - } - if (e.isInstanceOf[IOException]) { - return e.asInstanceOf[IOException] - } - return IOExceptionSupport.create(e) - } - - def waitOn(future: java.util.concurrent.Future[AnyRef]): Unit = { - try { - future.get - } - catch { - case e: Throwable => { - throw toIOException(e) - } - } - } -} - -case class DurableSubscription(subKey:Long, topicKey:Long, info: SubscriptionInfo) { - var gcPosition = 0L - var lastAckPosition = 0L - var cursorPosition = 0L -} - -class LevelDBStoreTest(val store:LevelDBStore) extends LevelDBStoreTestMBean { - - import store._ - var suspendForce = false; - - override def setSuspendForce(value: Boolean): Unit = this.synchronized { - if( suspendForce!=value ) { - suspendForce = value; - if( suspendForce ) { - db.client.log.recordLogTestSupport.forceCall.suspend - } else { - db.client.log.recordLogTestSupport.forceCall.resume - } - } - } - - override def getSuspendForce: Boolean = this.synchronized { - suspendForce - } - - override def getForceCalls = this.synchronized { - db.client.log.recordLogTestSupport.forceCall.threads.get() - } - - var suspendWrite = false; - - override def setSuspendWrite(value: Boolean): Unit = this.synchronized { - if( suspendWrite!=value ) { - suspendWrite = value; - if( suspendWrite ) { - db.client.log.recordLogTestSupport.writeCall.suspend - } else { - db.client.log.recordLogTestSupport.writeCall.resume - } - } - } - - override def getSuspendWrite: Boolean = this.synchronized { - suspendWrite - } - - override def getWriteCalls = this.synchronized { - db.client.log.recordLogTestSupport.writeCall.threads.get() - } - - var suspendDelete = false; - - override def setSuspendDelete(value: Boolean): Unit = this.synchronized { - if( suspendDelete!=value ) { - suspendDelete = value; - if( suspendDelete ) { - db.client.log.recordLogTestSupport.deleteCall.suspend - } else { - db.client.log.recordLogTestSupport.deleteCall.resume - } - } - } - - override def getSuspendDelete: Boolean = this.synchronized { - suspendDelete - } - - override def getDeleteCalls = this.synchronized { - db.client.log.recordLogTestSupport.deleteCall.threads.get() - } - -} - -class LevelDBStoreView(val store:LevelDBStore) extends LevelDBStoreViewMBean { - import store._ - - def getAsyncBufferSize = asyncBufferSize - def getIndexDirectory = directory.getCanonicalPath - def getLogDirectory = Option(logDirectory).getOrElse(directory).getCanonicalPath - def getIndexBlockRestartInterval = indexBlockRestartInterval - def getIndexBlockSize = indexBlockSize - def getIndexCacheSize = indexCacheSize - def getIndexCompression = indexCompression - def getIndexFactory = db.client.factory.getClass.getName - def getIndexMaxOpenFiles = indexMaxOpenFiles - def getIndexWriteBufferSize = indexWriteBufferSize - def getLogSize = logSize - def getParanoidChecks = paranoidChecks - def getSync = sync - def getVerifyChecksums = verifyChecksums - - def getUowClosedCounter = db.uowClosedCounter - def getUowCanceledCounter = db.uowCanceledCounter - def getUowStoringCounter = db.uowStoringCounter - def getUowStoredCounter = db.uowStoredCounter - - def getUowMaxCompleteLatency = db.uow_complete_latency.get - def getMaxIndexWriteLatency = db.client.max_index_write_latency.get - def getMaxLogWriteLatency = db.client.log.max_log_write_latency.get - def getMaxLogFlushLatency = db.client.log.max_log_flush_latency.get - def getMaxLogRotateLatency = db.client.log.max_log_rotate_latency.get - - def resetUowMaxCompleteLatency = db.uow_complete_latency.reset - def resetMaxIndexWriteLatency = db.client.max_index_write_latency.reset - def resetMaxLogWriteLatency = db.client.log.max_log_write_latency.reset - def resetMaxLogFlushLatency = db.client.log.max_log_flush_latency.reset - def resetMaxLogRotateLatency = db.client.log.max_log_rotate_latency.reset - - def getIndexStats = db.client.index.getProperty("leveldb.stats") - - def compact() { - import hawtdispatch._ - var done = new CountDownLatch(1) - val positions = getTopicGCPositions - client.writeExecutor { - client.index.compact_needed = true - client.gc(positions) - done.countDown() - } - done.await() - } -} - -import LevelDBStore._ - -class LevelDBStore extends LockableServiceSupport with BrokerServiceAware with PersistenceAdapter with TransactionStore with PListStore with TransactionIdTransformerAware { - - final val wireFormat = new OpenWireFormat - final val db = new DBManager(this) - final var client = createClient - - @BeanProperty - var directory = DEFAULT_DIRECTORY - @BeanProperty - var logDirectory: File = null - - @BeanProperty - var logSize: Long = 1024 * 1024 * 100 - @BeanProperty - var indexFactory: String = "org.fusesource.leveldbjni.JniDBFactory, org.iq80.leveldb.impl.Iq80DBFactory" - @BeanProperty - var sync: Boolean = true - @BeanProperty - var verifyChecksums: Boolean = false - @BeanProperty - var indexMaxOpenFiles: Int = 1000 - @BeanProperty - var indexBlockRestartInterval: Int = 16 - @BeanProperty - var paranoidChecks: Boolean = false - @BeanProperty - var indexWriteBufferSize: Int = 1024*1024*6 - @BeanProperty - var indexBlockSize: Int = 4 * 1024 - @BeanProperty - var indexCompression: String = "snappy" - @BeanProperty - var logCompression: String = "none" - @BeanProperty - var indexCacheSize: Long = 1024 * 1024 * 256L - @BeanProperty - var flushDelay = 0 - @BeanProperty - var asyncBufferSize = 1024*1024*4 - @BeanProperty - var monitorStats = false - @BeanProperty - var autoCompactionRatio = 250 - - var purgeOnStatup: Boolean = false - - val queues = collection.mutable.HashMap[ActiveMQQueue, LevelDBStore#LevelDBMessageStore]() - val topics = collection.mutable.HashMap[ActiveMQTopic, LevelDBStore#LevelDBTopicMessageStore]() - val topicsById = collection.mutable.HashMap[Long, LevelDBStore#LevelDBTopicMessageStore]() - val plists = collection.mutable.HashMap[String, LevelDBStore#LevelDBPList]() - - private val lock = new Object(); - - def check_running = { - if( this.isStopped ) { - throw new SuppressReplyException("Store has been stopped") - } - } - - def init() = {} - - def createDefaultLocker() = { - var locker = new SharedFileLocker(); - locker.configure(this); - locker - } - - override def toString: String = { - return "LevelDB[" + directory.getAbsolutePath + "]" - } - - def objectName = { - var brokerON = brokerService.getBrokerObjectName - BrokerMBeanSupport.createPersistenceAdapterName(brokerON.toString, this.toString) - } - - var snappyCompressLogs = false - - def doStart: Unit = { - if( brokerService!=null ) { - wireFormat.setVersion(brokerService.getStoreOpenWireVersion) - } - snappyCompressLogs = logCompression.toLowerCase == "snappy" && Snappy != null - debug("starting") - - // Expose a JMX bean to expose the status of the store. - if(brokerService!=null && brokerService.isUseJmx){ - try { - AnnotatedMBean.registerMBean(brokerService.getManagementContext, new LevelDBStoreView(this), objectName) - if( java.lang.Boolean.getBoolean("org.apache.activemq.leveldb.test") ) { - val name = new ObjectName(objectName.toString + ",view=Test") - AnnotatedMBean.registerMBean(brokerService.getManagementContext, new LevelDBStoreTest(this), name) - } - } catch { - case e: Throwable => { - warn(e, "LevelDB Store could not be registered in JMX: " + e.getMessage) - } - } - } - - if (purgeOnStatup) { - purgeOnStatup = false - db.client.locked_purge - info("Purged: "+this) - } - - db.start - db.loadCollections - - // Finish recovering the prepared XA transactions. - import collection.JavaConversions._ - for( (txid, transaction) <- transactions ) { - assert( transaction.xacontainer_id != -1 ) - val (msgs, acks) = db.getXAActions(transaction.xacontainer_id) - transaction.xarecovery = (msgs, acks.map(_.ack)) - for ( msg <- msgs ) { - transaction.add(createMessageStore(msg.getDestination), null, msg, false); - } - for ( record <- acks ) { - var ack = record.ack - var store = createMessageStore(ack.getDestination) - if( record.sub == -1 ) { - store.preparedAcks.add(ack.getLastMessageId) - transaction.remove(store, ack); - } else { - val topic = store.asInstanceOf[LevelDBTopicMessageStore]; - for ( sub <- topic.subscription_with_key(record.sub) ) { - val position = db.queuePosition(ack.getLastMessageId) - transaction.updateAckPosition( topic, sub, position, ack); - sub.lastAckPosition = position - } - } - } - } - - // Remove topics that don't have subs.. - for( (name, topic) <- topics.toArray ) { - if( topic.subscription_count == 0 ) { - removeTopicMessageStore(name) - } - } - - debug("started") - } - - def doStop(stopper: ServiceStopper): Unit = { - db.stop - if(brokerService!=null && brokerService.isUseJmx){ - brokerService.getManagementContext().unregisterMBean(objectName); - if( java.lang.Boolean.getBoolean("org.apache.activemq.leveldb.test") ) - brokerService.getManagementContext().unregisterMBean(new ObjectName(objectName.toString+",view=Test")); - } - info("Stopped "+this) - } - - def broker_service = brokerService - - def blocking_executor:Executor = { - if( broker_service != null ) { - broker_service.getTaskRunnerFactory - } else { - BLOCKING_EXECUTOR - } - } - - var transactionIdTransformer: TransactionIdTransformer = new TransactionIdTransformer{ - def transform(txid: TransactionId): TransactionId = txid - } - - def setTransactionIdTransformer(transactionIdTransformer: TransactionIdTransformer) { - this.transactionIdTransformer = transactionIdTransformer - } - - def setBrokerName(brokerName: String): Unit = { - } - - def setUsageManager(usageManager: SystemUsage): Unit = { - } - - def deleteAllMessages: Unit = { - purgeOnStatup = true - } - - def getLastMessageBrokerSequenceId: Long = { - return 0 - } - - def createTransactionStore = new LevelDBTransactionStore(this) - - val transactions = new ConcurrentHashMap[TransactionId, Transaction]() - - trait TransactionAction { - def commit(uow:DelayableUOW):Unit - def prepare(uow:DelayableUOW):Unit - def rollback(uow:DelayableUOW):Unit - } - - case class Transaction(id:TransactionId) { - val commitActions = ListBuffer[TransactionAction]() - - val xaseqcounter: AtomicLong = new AtomicLong(0) - var xarecovery:(ListBuffer[Message], ListBuffer[MessageAck]) = null - var xacontainer_id = -1L - - def prepared = xarecovery!=null - def prepare = { - if( !prepared ) { - val done = new CountDownLatch(1) - withUow { uow => - xarecovery = (ListBuffer[Message](), ListBuffer[MessageAck]()) - xacontainer_id = db.createTransactionContainer(id.asInstanceOf[XATransactionId]) - for ( action <- commitActions ) { - action.prepare(uow) - } - uow.syncFlag = true - uow.addCompleteListener(done.countDown()) - } - done.await() - } - } - - def add(store:LevelDBStore#LevelDBMessageStore, context: ConnectionContext, message: Message, delay:Boolean) = { - commitActions += new TransactionAction() { - def commit(uow:DelayableUOW) = { - if( prepared ) { - uow.dequeue(xacontainer_id, message.getMessageId) - } - var copy = message.getMessageId.copy() - copy.setEntryLocator(null) - message.setMessageId(copy) - store.doAdd(uow, context, message, delay) - } - - def prepare(uow:DelayableUOW) = { - // add it to the xa container instead of the actual store container. - uow.enqueue(xacontainer_id, xaseqcounter.incrementAndGet, message, delay) - xarecovery._1 += message - } - - def rollback(uow:DelayableUOW) = { - if( prepared ) { - uow.dequeue(xacontainer_id, message.getMessageId) - } - } - - } - } - - def remove(store:LevelDBStore#LevelDBMessageStore, ack:MessageAck) = { - commitActions += new TransactionAction() { - - def commit(uow:DelayableUOW) = { - store.doRemove(uow, ack.getLastMessageId) - if( prepared ) { - store.preparedAcks.remove(ack.getLastMessageId) - } - } - - def prepare(uow:DelayableUOW) = { - // add it to the xa container instead of the actual store container. - uow.xaAck(XaAckRecord(xacontainer_id, xaseqcounter.incrementAndGet, ack)) - xarecovery._2 += ack - store.preparedAcks.add(ack.getLastMessageId) - } - - def rollback(uow: DelayableUOW) { - if( prepared ) { - store.preparedAcks.remove(ack.getLastMessageId) - } - } - } - } - - def updateAckPosition(store:LevelDBStore#LevelDBTopicMessageStore, sub: DurableSubscription, position: Long, ack:MessageAck) = { - commitActions += new TransactionAction() { - var prev_position = sub.lastAckPosition - - def commit(uow:DelayableUOW) = { - store.doUpdateAckPosition(uow, sub, position) - sub.gcPosition = position - } - def prepare(uow:DelayableUOW) = { - prev_position = sub.lastAckPosition - sub.lastAckPosition = position - uow.xaAck(XaAckRecord(xacontainer_id, xaseqcounter.incrementAndGet, ack, sub.subKey)) - } - def rollback(uow: DelayableUOW) { - if ( prepared ) { - sub.lastAckPosition = prev_position - } - } - } - } - } - - def transaction(original: TransactionId) = { - val txid = transactionIdTransformer.transform(original) - var rc = transactions.get(txid) - if( rc == null ) { - rc = Transaction(txid) - val prev = transactions.putIfAbsent(txid, rc) - if (prev!=null) { - rc = prev - } - } - rc - } - - def verify_running = { - if( isStopping || isStopped ) { - try { - throw new IOException("Not running") - } catch { - case e:IOException => - if( broker_service!=null ) { - broker_service.handleIOException(e) - } - throw new SuppressReplyException(e); - } - } - } - - def commit(original: TransactionId, wasPrepared: Boolean, preCommit: Runnable, postCommit: Runnable) = { - - verify_running - - val txid = transactionIdTransformer.transform(original) - transactions.remove(txid) match { - case null => - // Only in-flight non-persistent messages in this TX. - if( preCommit!=null ) - preCommit.run() - if( postCommit!=null ) - postCommit.run() - case tx => - val done = new CountDownLatch(1) - // Ugly synchronization hack to make sure messages are ordered the way the cursor expects them. - transactions.synchronized { - withUow { uow => - for( action <- tx.commitActions ) { - action.commit(uow) - } - uow.syncFlag = true - uow.addCompleteListener { - if( preCommit!=null ) - preCommit.run() - done.countDown() - } - } - } - done.await() - if( tx.prepared ) { - db.removeTransactionContainer(tx.xacontainer_id) - } - if( postCommit!=null ) - postCommit.run() - } - } - - def rollback(original: TransactionId) = { - verify_running - - val txid = transactionIdTransformer.transform(original) - transactions.remove(txid) match { - case null => - debug("on rollback, the transaction " + txid + " does not exist") - case tx => - val done = new CountDownLatch(1) - withUow { uow => - for( action <- tx.commitActions.reverse ) { - action.rollback(uow) - } - uow.syncFlag = true - uow.addCompleteListener { done.countDown() } - } - done.await() - if( tx.prepared ) { - db.removeTransactionContainer(tx.xacontainer_id) - } - } - } - - def prepare(original: TransactionId) = { - verify_running - - val tx = transactionIdTransformer.transform(original) - transactions.get(tx) match { - case null => - warn("on prepare, the transaction " + tx + " does not exist") - case tx => - tx.prepare - } - } - - var doingRecover = false - def recover(listener: TransactionRecoveryListener) = { - - verify_running - - this.doingRecover = true - try { - import collection.JavaConversions._ - for ( (txid, transaction) <- transactions ) { - if( transaction.prepared ) { - val (msgs, acks) = transaction.xarecovery - listener.recover(txid.asInstanceOf[XATransactionId], msgs.toArray, acks.toArray); - } - } - } finally { - this.doingRecover = false - } - } - - - def getPList(name: String): PList = { - lock.synchronized(plists.get(name)).getOrElse(db.createPList(name)) - } - - def createPList(name: String, key: Long):LevelDBStore#LevelDBPList = { - var rc = new LevelDBPList(name, key) - lock.synchronized { - plists.put(name, rc) - } - rc - } - - def removePList(name: String): Boolean = { - plists.remove(name) match { - case Some(list)=> - db.destroyPList(list.key) - list.listSize.set(0) - true - case None => - false - } - } - - - def createMessageStore(destination: ActiveMQDestination):LevelDBStore#LevelDBMessageStore = { - destination match { - case destination:ActiveMQQueue => - createQueueMessageStore(destination) - case destination:ActiveMQTopic => - createTopicMessageStore(destination) - } - } - - def createQueueMessageStore(destination: ActiveMQQueue):LevelDBStore#LevelDBMessageStore = { - lock.synchronized(queues.get(destination)).getOrElse(db.createQueueStore(destination)) - } - - def createQueueMessageStore(destination: ActiveMQQueue, key: Long):LevelDBStore#LevelDBMessageStore = { - var rc = new LevelDBMessageStore(destination, key) - lock.synchronized { - queues.put(destination, rc) - } - rc - } - - def removeQueueMessageStore(destination: ActiveMQQueue): Unit = lock synchronized { - queues.remove(destination).foreach { store=> - db.destroyQueueStore(store.key) - } - } - - def createTopicMessageStore(destination: ActiveMQTopic):LevelDBStore#LevelDBTopicMessageStore = { - lock.synchronized(topics.get(destination)).getOrElse(db.createTopicStore(destination)) - } - - def createTopicMessageStore(destination: ActiveMQTopic, key: Long):LevelDBStore#LevelDBTopicMessageStore = { - var rc = new LevelDBTopicMessageStore(destination, key) - lock synchronized { - topics.put(destination, rc) - topicsById.put(key, rc) - } - rc - } - - def createJobSchedulerStore():JobSchedulerStore = { - throw new UnsupportedOperationException(); - } - - def removeTopicMessageStore(destination: ActiveMQTopic): Unit = { - topics.remove(destination).foreach { store=> - store.subscriptions.values.foreach { sub => - db.removeSubscription(sub) - } - store.subscriptions.clear() - db.destroyQueueStore(store.key) - } - } - - def getLogAppendPosition = db.getLogAppendPosition - - def getDestinations: Set[ActiveMQDestination] = { - import collection.JavaConversions._ - var rc: HashSet[ActiveMQDestination] = new HashSet[ActiveMQDestination] - rc.addAll(topics.keys) - rc.addAll(queues.keys) - return rc - } - - def getLastProducerSequenceId(id: ProducerId) = db.getLastProducerSequenceId(id) - - def setMaxFailoverProducersToTrack(maxFailoverProducersToTrack:Int ) = { - db.producerSequenceIdTracker.setMaximumNumberOfProducersToTrack(maxFailoverProducersToTrack); - } - - def getMaxFailoverProducersToTrack() = { - db.producerSequenceIdTracker.getMaximumNumberOfProducersToTrack() - } - - def setFailoverProducersAuditDepth(failoverProducersAuditDepth:Int) = { - db.producerSequenceIdTracker.setAuditDepth(failoverProducersAuditDepth); - } - - def getFailoverProducersAuditDepth() = { - db.producerSequenceIdTracker.getAuditDepth(); - } - - def size: Long = { - return db.client.size - } - - def checkpoint(sync: Boolean): Unit = db.checkpoint(sync) - - def withUow[T](func:(DelayableUOW)=>T):T = { - val uow = db.createUow - try { - func(uow) - } finally { - uow.release() - } - } - - private def subscriptionKey(clientId: String, subscriptionName: String): String = { - return clientId + ":" + subscriptionName - } - - case class LevelDBMessageStore(dest: ActiveMQDestination, val key: Long) extends AbstractMessageStore(dest) { - - val lastSeq: AtomicLong = new AtomicLong(0) - protected var cursorPosition: Long = 0 - val preparedAcks = new HashSet[MessageId]() - val pendingCursorAdds = new LinkedList[Long]() - lastSeq.set(db.getLastQueueEntrySeq(key)) - - def cursorResetPosition = 0L - - def doAdd(uow: DelayableUOW, context: ConnectionContext, message: Message, delay:Boolean): CountDownFuture[AnyRef] = { - check_running - message.beforeMarshall(wireFormat); - message.incrementReferenceCount() - uow.addCompleteListener({ - message.decrementReferenceCount() - }) - lastSeq.synchronized { - val seq = lastSeq.incrementAndGet() - message.getMessageId.setFutureOrSequenceLong(seq); - // null context on xa recovery, we want to bypass the cursor & pending adds as it will be reset - if (indexListener != null && context != null) { - pendingCursorAdds.synchronized { pendingCursorAdds.add(seq) } - indexListener.onAdd(new MessageContext(context, message, new Runnable { - def run(): Unit = pendingCursorAdds.synchronized { pendingCursorAdds.remove(seq) } - })) - } - uow.enqueue(key, seq, message, delay) - } - } - - override def asyncAddQueueMessage(context: ConnectionContext, message: Message) = asyncAddQueueMessage(context, message, false) - override def asyncAddQueueMessage(context: ConnectionContext, message: Message, delay: Boolean): ListenableFuture[AnyRef] = { - check_running - message.getMessageId.setEntryLocator(null) - if( message.getTransactionId!=null ) { - transaction(message.getTransactionId).add(this, context, message, delay) - DONE - } else { - withUow { uow=> - doAdd(uow, context, message, delay) - } - } - } - - override def addMessage(context: ConnectionContext, message: Message) = addMessage(context, message, false) - override def addMessage(context: ConnectionContext, message: Message, delay: Boolean): Unit = { - check_running - waitOn(asyncAddQueueMessage(context, message, delay)) - } - - override def updateMessage(message: Message): Unit = { - check_running - // the only current usage of update is to increment the redelivery counter - withUow {uow => uow.incrementRedelivery(key, message.getMessageId)} - } - - def doRemove(uow: DelayableUOW, id: MessageId): CountDownFuture[AnyRef] = { - uow.dequeue(key, id) - } - - override def removeAsyncMessage(context: ConnectionContext, ack: MessageAck): Unit = { - check_running - if( ack.getTransactionId!=null ) { - transaction(ack.getTransactionId).remove(this, ack) - } else { - waitOn(withUow{uow=> - doRemove(uow, ack.getLastMessageId) - }) - } - } - - def removeMessage(context: ConnectionContext, ack: MessageAck): Unit = { - check_running - removeAsyncMessage(context, ack) - } - - def getMessage(id: MessageId): Message = { - check_running - var message: Message = db.getMessage(id) - if (message == null) { - throw new IOException("Message id not found: " + id) - } - return message - } - - def removeAllMessages(context: ConnectionContext): Unit = { - check_running - db.collectionEmpty(key) - cursorPosition = cursorResetPosition - } - - override def getMessageCount: Int = { - return db.collectionSize(key).toInt - } - - override def isEmpty: Boolean = { - return db.collectionIsEmpty(key) - } - - def getCursorPendingLimit: Long = { - pendingCursorAdds.synchronized { Option(pendingCursorAdds.peek).getOrElse(Long.MaxValue) } - } - - def recover(listener: MessageRecoveryListener): Unit = { - check_running - cursorPosition = db.cursorMessages(preparedAcks, key, listener, cursorResetPosition, getCursorPendingLimit) - } - - def resetBatching: Unit = { - cursorPosition = cursorResetPosition - } - - def recoverNextMessages(maxReturned: Int, listener: MessageRecoveryListener): Unit = { - check_running - cursorPosition = db.cursorMessages(preparedAcks, key, listener, cursorPosition, getCursorPendingLimit, maxReturned) - } - - override def setBatch(id: MessageId): Unit = { - cursorPosition = Math.min(getCursorPendingLimit, db.queuePosition(id)) + 1 - } - - } - - // - // This gts called when the store is first loading up, it restores - // the existing durable subs.. - def createSubscription(sub:DurableSubscription) = { - lock.synchronized(topicsById.get(sub.topicKey)) match { - case Some(topic) => - topic.synchronized { - topic.subscriptions.put((sub.info.getClientId, sub.info.getSubcriptionName), sub) - } - case None => - // Topic does not exist.. so kill the durable sub.. - db.removeSubscription(sub) - } - } - - def getTopicGCPositions = { - import collection.JavaConversions._ - val topics = lock.synchronized { - new ArrayList(topicsById.values()) - } - topics.flatMap(_.gcPosition).toSeq - } - - class LevelDBTopicMessageStore(dest: ActiveMQDestination, key: Long) extends LevelDBMessageStore(dest, key) with TopicMessageStore { - val subscriptions = collection.mutable.HashMap[(String, String), DurableSubscription]() - var firstSeq = 0L - - override def cursorResetPosition = firstSeq - - def subscription_with_key(key:Long) = subscriptions.find(_._2.subKey == key).map(_._2) - - override def asyncAddQueueMessage(context: ConnectionContext, message: Message, delay: Boolean): ListenableFuture[AnyRef] = { - super.asyncAddQueueMessage(context, message, false) - } - - var stats = new MessageStoreSubscriptionStatistics(false) - - def getMessageStoreSubStatistics: MessageStoreSubscriptionStatistics = { - stats; - } - - def subscription_count = subscriptions.synchronized { - subscriptions.size - } - - def gcPosition:Option[(Long, Long)] = { - var pos = lastSeq.get() - subscriptions.synchronized { - subscriptions.values.foreach { sub => - if( sub.gcPosition < pos ) { - pos = sub.gcPosition - } - } - if( firstSeq != pos+1) { - firstSeq = pos+1 - Some(key, firstSeq) - } else { - None - } - } - } - - def addSubscription(info: SubscriptionInfo, retroactive: Boolean) = { - check_running - var sub = db.addSubscription(key, info) - subscriptions.synchronized { - subscriptions.put((info.getClientId, info.getSubcriptionName), sub) - } - sub.lastAckPosition = if (retroactive) 0 else lastSeq.get() - sub.gcPosition = sub.lastAckPosition - waitOn(withUow{ uow=> - uow.updateAckPosition(sub.subKey, sub.lastAckPosition) - uow.countDownFuture - }) - } - - def getAllSubscriptions: Array[SubscriptionInfo] = subscriptions.synchronized { - check_running - subscriptions.values.map(_.info).toArray - } - - def lookupSubscription(clientId: String, subscriptionName: String): SubscriptionInfo = subscriptions.synchronized { - check_running - subscriptions.get((clientId, subscriptionName)).map(_.info).getOrElse(null) - } - - def deleteSubscription(clientId: String, subscriptionName: String): Unit = { - check_running - subscriptions.synchronized { - subscriptions.remove((clientId, subscriptionName)) - }.foreach(db.removeSubscription(_)) - } - - private def lookup(clientId: String, subscriptionName: String): Option[DurableSubscription] = subscriptions.synchronized { - subscriptions.get((clientId, subscriptionName)) - } - - def doUpdateAckPosition(uow: DelayableUOW, sub: DurableSubscription, position: Long) = { - sub.lastAckPosition = position - sub.gcPosition = position - uow.updateAckPosition(sub.subKey, sub.lastAckPosition) - } - - def acknowledge(context: ConnectionContext, clientId: String, subscriptionName: String, messageId: MessageId, ack: MessageAck): Unit = { - check_running - lookup(clientId, subscriptionName).foreach { sub => - var position = db.queuePosition(messageId) - if( ack.getTransactionId!=null ) { - transaction(ack.getTransactionId).updateAckPosition(this, sub, position, ack) - DONE - } else { - waitOn(withUow{ uow=> - doUpdateAckPosition(uow, sub, position) - uow.countDownFuture - }) - } - - } - } - - def resetBatching(clientId: String, subscriptionName: String): Unit = { - check_running - lookup(clientId, subscriptionName).foreach { sub => - sub.cursorPosition = 0 - } - } - def recoverSubscription(clientId: String, subscriptionName: String, listener: MessageRecoveryListener): Unit = { - check_running - lookup(clientId, subscriptionName).foreach { sub => - sub.cursorPosition = db.cursorMessages(preparedAcks, key, listener, sub.cursorPosition.max(sub.lastAckPosition+1)) - } - } - - def recoverNextMessages(clientId: String, subscriptionName: String, maxReturned: Int, listener: MessageRecoveryListener): Unit = { - check_running - lookup(clientId, subscriptionName).foreach { sub => - sub.cursorPosition = db.cursorMessages(preparedAcks, key, listener, sub.cursorPosition.max(sub.lastAckPosition+1), Long.MaxValue, maxReturned) - } - } - - def getMessageCount(clientId: String, subscriptionName: String): Int = { - check_running - lookup(clientId, subscriptionName) match { - case Some(sub) => - (lastSeq.get - sub.lastAckPosition).toInt - case None => 0 - } - } - - def getMessageSize(clientId: String, subscriptionName: String): Long = { - check_running - return 0 - } - - } - class LevelDBPList(val name: String, val key: Long) extends PList { - import LevelDBClient._ - - val lastSeq = new AtomicLong(Long.MaxValue/2) - val firstSeq = new AtomicLong(lastSeq.get+1) - val listSize = new AtomicLong(0) - - def getName: String = name - def destroy() = { - check_running - removePList(name) - } - - def addFirst(id: String, bs: ByteSequence): AnyRef = { - check_running - var pos = lastSeq.decrementAndGet() - add(pos, id, bs) - listSize.incrementAndGet() - new java.lang.Long(pos) - } - - def addLast(id: String, bs: ByteSequence): AnyRef = { - check_running - var pos = lastSeq.incrementAndGet() - add(pos, id, bs) - listSize.incrementAndGet() - new java.lang.Long(pos) - } - - def add(pos:Long, id: String, bs: ByteSequence) = { - check_running - val encoded_key = encodeLongLong(key, pos) - val encoded_id = new UTF8Buffer(id) - val os = new DataByteArrayOutputStream(2+encoded_id.length+bs.length) - os.writeShort(encoded_id.length) - os.write(encoded_id.data, encoded_id.offset, encoded_id.length) - os.write(bs.getData, bs.getOffset, bs.getLength) - db.plistPut(encoded_key, os.toBuffer.toByteArray) - } - - def remove(position: AnyRef): Boolean = { - check_running - val pos = position.asInstanceOf[java.lang.Long].longValue() - val encoded_key = encodeLongLong(key, pos) - db.plistGet(encoded_key) match { - case Some(value) => - db.plistDelete(encoded_key) - listSize.decrementAndGet() - true - case None => - false - } - } - - def isEmpty = size()==0 - def size(): Long = listSize.get() - def messageSize(): Long = 0 - - def iterator() = new PListIterator() { - check_running - val prefix = LevelDBClient.encodeLong(key) - var dbi = db.plistIterator - var last_key:Array[Byte] = _ - - dbi.seek(prefix); - - - def hasNext: Boolean = dbi!=null && dbi.hasNext && dbi.peekNext.getKey.startsWith(prefix) - def next() = { - if ( dbi==null || !dbi.hasNext ) { - throw new NoSuchElementException(); - } - val n = dbi.peekNext(); - last_key = n.getKey - val (k, pos) = decodeLongLong(last_key) - if( k!=key ) { - throw new NoSuchElementException(); - } - var value = n.getValue - val is = new org.fusesource.hawtbuf.DataByteArrayInputStream(value) - val id = is.readBuffer(is.readShort()).utf8().toString - val data = new ByteSequence(value, is.getPos, value.length-is.getPos) - dbi.next() - new PListEntry(id, data, pos) - } - - def release() = { - dbi.close() - dbi = null - } - - def remove() = { - if( last_key==null ) { - throw new NoSuchElementException(); - } - db.plistDelete(last_key) - listSize.decrementAndGet() - last_key = null - } - } - - } - - class LevelDBTransactionStore(val store:LevelDBStore) extends TransactionStore { - def start() = {} - - def stop() = {} - - def prepare(txid: TransactionId) = store.prepare(txid) - - def commit(txid: TransactionId, wasPrepared: Boolean, preCommit: Runnable, postCommit: Runnable) = store.commit(txid, wasPrepared, preCommit, postCommit) - - def rollback(txid: TransactionId) = store.rollback(txid) - - def recover(listener: TransactionRecoveryListener) = store.recover(listener) - } - - /////////////////////////////////////////////////////////////////////////// - // The following methods actually have nothing to do with JMS txs... It's more like - // operation batch.. we handle that in the DBManager tho.. - /////////////////////////////////////////////////////////////////////////// - def beginTransaction(context: ConnectionContext): Unit = {} - def commitTransaction(context: ConnectionContext): Unit = {} - def rollbackTransaction(context: ConnectionContext): Unit = {} - - def createClient = new LevelDBClient(this); - - def allowIOResumption() = {} -} diff --git a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/RecordLog.scala b/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/RecordLog.scala deleted file mode 100644 index c9b7916a93..0000000000 --- a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/RecordLog.scala +++ /dev/null @@ -1,663 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.activemq.leveldb - -import java.{lang=>jl} -import java.{util=>ju} - -import java.util.zip.CRC32 -import java.util.Map.Entry -import java.util.concurrent.atomic.{AtomicInteger, AtomicLong} -import java.io._ -import org.fusesource.hawtbuf.{DataByteArrayInputStream, DataByteArrayOutputStream, Buffer} -import org.fusesource.hawtdispatch.BaseRetained -import org.apache.activemq.leveldb.util.FileSupport._ -import org.apache.activemq.util.LRUCache -import util.TimeMetric._ -import util.{TimeMetric, Log} -import java.util.TreeMap -import java.util.concurrent.locks.{ReentrantReadWriteLock, ReadWriteLock} -import java.util.concurrent.CountDownLatch - -object RecordLog extends Log { - - // The log files contain a sequence of variable length log records: - // record := header + data - // - // header := - // '*' : int8 // Start of Record Magic - // kind : int8 // Help identify content type of the data. - // checksum : uint32 // crc32c of the data[] - // length : uint32 // the length the the data - - val LOG_HEADER_PREFIX = '*'.toByte - val UOW_END_RECORD = -1.toByte - - val LOG_HEADER_SIZE = 10 - - val BUFFER_SIZE = 1024*512 - val BYPASS_BUFFER_SIZE = 1024*16 - - case class LogInfo(file:File, position:Long, length:Long) { - def limit = position+length - } - - def encode_long(a1:Long) = { - val out = new DataByteArrayOutputStream(8) - out.writeLong(a1) - out.toBuffer - } - - def decode_long(value:Buffer):Long = { - val in = new DataByteArrayInputStream(value) - in.readLong() - } - -} - -class SuspendCallSupport { - - val lock = new ReentrantReadWriteLock() - var resumeLatch:CountDownLatch = _ - var resumedLatch:CountDownLatch = _ - @volatile - var threads = new AtomicInteger() - - def suspend = this.synchronized { - val suspended = new CountDownLatch(1) - resumeLatch = new CountDownLatch(1) - resumedLatch = new CountDownLatch(1) - new Thread("Suspend Lock") { - override def run = { - try { - lock.writeLock().lock() - suspended.countDown() - resumeLatch.await() - } finally { - lock.writeLock().unlock(); - resumedLatch.countDown() - } - } - }.start() - suspended.await() - } - - def resume = this.synchronized { - if( resumedLatch != null ) { - resumeLatch.countDown() - resumedLatch.await(); - resumeLatch = null - resumedLatch = null - } - } - - def call[T](func: =>T):T= { - threads.incrementAndGet() - lock.readLock().lock() - try { - func - } finally { - threads.decrementAndGet() - lock.readLock().unlock() - } - } - -} - -class RecordLogTestSupport { - - val forceCall = new SuspendCallSupport() - val writeCall = new SuspendCallSupport() - val deleteCall = new SuspendCallSupport() - -} - -case class RecordLog(directory: File, logSuffix:String) { - import RecordLog._ - - directory.mkdirs() - - var logSize = 1024 * 1024 * 100L - var current_appender:LogAppender = _ - var verify_checksums = false - val log_infos = new TreeMap[Long, LogInfo]() - - var recordLogTestSupport:RecordLogTestSupport = - if( java.lang.Boolean.getBoolean("org.apache.activemq.leveldb.test") ) { - new RecordLogTestSupport() - } else { - null - } - - - object log_mutex - - def delete(id:Long) = { - log_mutex.synchronized { - // We can't delete the current appender. - if( current_appender.position != id ) { - Option(log_infos.get(id)).foreach { info => - onDelete(info.file) - onDelete(id) - log_infos.remove(id) - reader_cache.synchronized { - val reader = reader_cache.remove(info.file); - if( reader!=null ) { - reader.release(); - } - } - } - } - } - } - - protected def onDelete(file:Long) = { - } - - protected def onDelete(file:File) = { - if( recordLogTestSupport!=null ) { - recordLogTestSupport.deleteCall.call { - file.delete() - } - } else { - file.delete() - } - } - - def checksum(data: Buffer): Int = { - val checksum = new CRC32 - checksum.update(data.data, data.offset, data.length) - (checksum.getValue & 0xFFFFFFFF).toInt - } - - class LogAppender(file:File, position:Long, var append_offset:Long=0L) extends LogReader(file, position) { - - val info = new LogInfo(file, position, 0) - - override def open = new RandomAccessFile(file, "rw") - - override def on_close ={ - force - } - - val flushed_offset = new AtomicLong(append_offset) - - def append_position = { - position+append_offset - } - - // set the file size ahead of time so that we don't have to sync the file - // meta-data on every log sync. - if( append_offset==0 ) { - channel.position(logSize-1) - channel.write(new Buffer(1).toByteBuffer) - channel.force(true) - channel.position(0) - } - - val write_buffer = new DataByteArrayOutputStream(BUFFER_SIZE+LOG_HEADER_SIZE) - - def force = { - flush - max_log_flush_latency { - // only need to update the file metadata if the file size changes.. - if( recordLogTestSupport!=null ) { - recordLogTestSupport.forceCall.call { - channel.force(append_offset > logSize) - } - } else { - channel.force(append_offset > logSize) - } - } - } - - - def skip(length:Long) = this.synchronized { - flush - append_offset += length - flushed_offset.addAndGet(length) - } - - /** - * returns the offset position of the data record. - */ - def append(id:Byte, data: Buffer) = this.synchronized { - val record_position = append_position - val data_length = data.length - val total_length = LOG_HEADER_SIZE + data_length - - if( write_buffer.position() + total_length > BUFFER_SIZE ) { - flush - } - - val cs: Int = checksum(data) -// trace("Writing at: "+record_position+" len: "+data_length+" with checksum: "+cs) - - if( false && total_length > BYPASS_BUFFER_SIZE ) { - - // Write the header and flush.. - write_buffer.writeByte(LOG_HEADER_PREFIX) - write_buffer.writeByte(id) - write_buffer.writeInt(cs) - write_buffer.writeInt(data_length) - - append_offset += LOG_HEADER_SIZE - flush - - // Directly write the data to the channel since it's large. - val buffer = data.toByteBuffer - val pos = append_offset+LOG_HEADER_SIZE - val remaining = buffer.remaining - - if( recordLogTestSupport!=null ) { - recordLogTestSupport.writeCall.call { - channel.write(buffer, pos) - } - } else { - channel.write(buffer, pos) - } - - flushed_offset.addAndGet(remaining) - if( buffer.hasRemaining ) { - throw new IOException("Short write") - } - append_offset += data_length - - } else { - write_buffer.writeByte(LOG_HEADER_PREFIX) - write_buffer.writeByte(id) - write_buffer.writeInt(cs) - write_buffer.writeInt(data_length) - write_buffer.write(data.data, data.offset, data_length) - append_offset += total_length - } - (record_position, info) - } - - def flush = max_log_flush_latency { this.synchronized { - if( write_buffer.position() > 0 ) { - val buffer = write_buffer.toBuffer.toByteBuffer - val remaining = buffer.remaining - val pos = append_offset-remaining - - if( recordLogTestSupport!=null ) { - recordLogTestSupport.writeCall.call { - channel.write(buffer, pos) - } - } else { - channel.write(buffer, pos) - } - - flushed_offset.addAndGet(remaining) - if( buffer.hasRemaining ) { - throw new IOException("Short write") - } - write_buffer.reset() - } } - } - - override def check_read_flush(end_offset:Long) = { - if( flushed_offset.get() < end_offset ) { - flush - } - } - - } - - case class LogReader(file:File, position:Long) extends BaseRetained { - - def open = new RandomAccessFile(file, "r") - - val fd = open - val channel = fd.getChannel - - override def dispose() { - on_close - fd.close() - } - - def on_close = {} - - def check_read_flush(end_offset:Long) = {} - - def read(record_position:Long, length:Int) = { - val offset = record_position-position - assert(offset >=0 ) - - check_read_flush(offset+LOG_HEADER_SIZE+length) - - if(verify_checksums) { - - val record = new Buffer(LOG_HEADER_SIZE+length) - - def record_is_not_changing = { - using(open) { fd => - val channel = fd.getChannel - val new_record = new Buffer(LOG_HEADER_SIZE+length) - channel.read(new_record.toByteBuffer, offset) - var same = record == new_record - println(same) - same - } - } - - if( channel.read(record.toByteBuffer, offset) != record.length ) { - assert( record_is_not_changing ) - throw new IOException("short record at position: "+record_position+" in file: "+file+", offset: "+offset) - } - - val is = new DataByteArrayInputStream(record) - val prefix = is.readByte() - if( prefix != LOG_HEADER_PREFIX ) { - assert(record_is_not_changing) - throw new IOException("invalid record at position: "+record_position+" in file: "+file+", offset: "+offset) - } - - val id = is.readByte() - val expectedChecksum = is.readInt() - val expectedLength = is.readInt() - val data = is.readBuffer(length) - - // If your reading the whole record we can verify the data checksum - if( expectedLength == length ) { - if( expectedChecksum != checksum(data) ) { - assert(record_is_not_changing) - throw new IOException("checksum does not match at position: "+record_position+" in file: "+file+", offset: "+offset) - } - } - - data - } else { - val data = new Buffer(length) - var bb = data.toByteBuffer - var position = offset+LOG_HEADER_SIZE - while( bb.hasRemaining ) { - var count = channel.read(bb, position) - if( count == 0 ) { - throw new IOException("zero read at file '%s' offset: %d".format(file, position)) - } - if( count < 0 ) { - throw new EOFException("File '%s' offset: %d".format(file, position)) - } - position += count - } - data - } - } - - def read(record_position:Long) = { - val offset = record_position-position - val header = new Buffer(LOG_HEADER_SIZE) - check_read_flush(offset+LOG_HEADER_SIZE) - channel.read(header.toByteBuffer, offset) - val is = header.bigEndianEditor(); - val prefix = is.readByte() - if( prefix != LOG_HEADER_PREFIX ) { - // Does not look like a record. - throw new IOException("invalid record position %d (file: %s, offset: %d)".format(record_position, file.getAbsolutePath, offset)) - } - val id = is.readByte() - val expectedChecksum = is.readInt() - val length = is.readInt() - val data = new Buffer(length) - - check_read_flush(offset+LOG_HEADER_SIZE+length) - if( channel.read(data.toByteBuffer, offset+LOG_HEADER_SIZE) != length ) { - throw new IOException("short record") - } - - if(verify_checksums) { - if( expectedChecksum != checksum(data) ) { - throw new IOException("checksum does not match") - } - } - (id, data, record_position+LOG_HEADER_SIZE+length) - } - - def check(record_position:Long):Option[(Long, Option[Long])] = { - var offset = record_position-position - val header = new Buffer(LOG_HEADER_SIZE) - channel.read(header.toByteBuffer, offset) - val is = header.bigEndianEditor(); - val prefix = is.readByte() - if( prefix != LOG_HEADER_PREFIX ) { - return None // Does not look like a record. - } - val kind = is.readByte() - val expectedChecksum = is.readInt() - val length = is.readInt() - - val chunk = new Buffer(1024*4) - val chunkbb = chunk.toByteBuffer - offset += LOG_HEADER_SIZE - - // Read the data in in chunks to avoid - // OOME if we are checking an invalid record - // with a bad record length - val checksumer = new CRC32 - var remaining = length - while( remaining > 0 ) { - val chunkSize = remaining.min(1024*4); - chunkbb.position(0) - chunkbb.limit(chunkSize) - channel.read(chunkbb, offset) - if( chunkbb.hasRemaining ) { - return None - } - checksumer.update(chunk.data, 0, chunkSize) - offset += chunkSize - remaining -= chunkSize - } - - val checksum = ( checksumer.getValue & 0xFFFFFFFF).toInt - if( expectedChecksum != checksum ) { - return None - } - val uow_start_pos = if(kind == UOW_END_RECORD && length==8) Some(decode_long(chunk)) else None - return Some(record_position+LOG_HEADER_SIZE+length, uow_start_pos) - } - - def verifyAndGetEndOffset:Long = { - var pos = position; - var current_uow_start = pos - val limit = position+channel.size() - while(pos < limit) { - check(pos) match { - case Some((next, uow_start_pos)) => - uow_start_pos.foreach { uow_start_pos => - if( uow_start_pos == current_uow_start ) { - current_uow_start = next - } else { - return current_uow_start-position - } - } - pos = next - case None => - return current_uow_start-position - } - } - return current_uow_start-position - } - } - - def create_log_appender(position: Long, offset:Long) = { - new LogAppender(next_log(position), position, offset) - } - - def create_appender(position: Long, offset:Long): Any = { - log_mutex.synchronized { - if(current_appender!=null) { - log_infos.put (position, new LogInfo(current_appender.file, current_appender.position, current_appender.append_offset)) - } - current_appender = create_log_appender(position, offset) - log_infos.put(position, new LogInfo(current_appender.file, position, 0)) - } - } - - val max_log_write_latency = TimeMetric() - val max_log_flush_latency = TimeMetric() - val max_log_rotate_latency = TimeMetric() - - def open(appender_size:Long= -1) = { - log_mutex.synchronized { - log_infos.clear() - LevelDBClient.find_sequence_files(directory, logSuffix).foreach { case (position,file) => - log_infos.put(position, LogInfo(file, position, file.length())) - } - - if( log_infos.isEmpty ) { - create_appender(0,0) - } else { - val file = log_infos.lastEntry().getValue - if( appender_size == -1 ) { - val r = LogReader(file.file, file.position) - try { - val endOffset = r.verifyAndGetEndOffset - using(new RandomAccessFile(file.file, "rw")) { file=> - try { - file.getChannel.truncate(endOffset) - } - catch { - case e:Throwable => - e.printStackTrace() - } - file.getChannel.force(true) - } - create_appender(file.position,endOffset) - } finally { - r.release() - } - } else { - create_appender(file.position,appender_size) - } - } - } - } - - def isOpen = { - log_mutex.synchronized { - current_appender!=null; - } - } - - def close = { - log_mutex.synchronized { - if( current_appender!=null ) { - current_appender.release - } - } - } - - def appender_limit = current_appender.append_position - def appender_start = current_appender.position - - def next_log(position:Long) = LevelDBClient.create_sequence_file(directory, position, logSuffix) - - def appender[T](func: (LogAppender)=>T):T= { - val intial_position = current_appender.append_position - try { - max_log_write_latency { - val rc = func(current_appender) - if( current_appender.append_position != intial_position ) { - // Record a UOW_END_RECORD so that on recovery we only replay full units of work. - current_appender.append(UOW_END_RECORD,encode_long(intial_position)) - } - rc - } - } finally { - current_appender.flush - max_log_rotate_latency { - log_mutex.synchronized { - if ( current_appender.append_offset >= logSize ) { - rotate - } - } - } - } - } - - - def rotate[T] = log_mutex.synchronized { - current_appender.release() - on_log_rotate() - create_appender(current_appender.append_position, 0) - } - - var on_log_rotate: ()=>Unit = ()=>{} - - private val reader_cache = new LRUCache[File, LogReader](100) { - protected override def onCacheEviction(entry: Entry[File, LogReader]) = { - entry.getValue.release() - } - } - - def log_info(pos:Long) = log_mutex.synchronized { Option(log_infos.floorEntry(pos)).map(_.getValue) } - - def log_file_positions = log_mutex.synchronized { - import collection.JavaConversions._ - log_infos.map(_._2.position).toArray - } - - private def get_reader[T](record_position:Long)(func: (LogReader)=>T):Option[T] = { - - val (info, appender) = log_mutex.synchronized { - log_info(record_position) match { - case None => - warn("No reader available for position: %x, log_infos: %s", record_position, log_infos) - return None - case Some(info) => - if(info.position == current_appender.position) { - current_appender.retain() - (info, current_appender) - } else { - (info, null) - } - } - } - - val reader = if( appender!=null ) { - // read from the current appender. - appender - } else { - // Checkout a reader from the cache... - reader_cache.synchronized { - var reader = reader_cache.get(info.file) - if(reader==null) { - reader = LogReader(info.file, info.position) - reader_cache.put(info.file, reader) - } - reader.retain() - reader - } - } - - try { - Some(func(reader)) - } finally { - reader.release - } - } - - def read(pos:Long) = { - get_reader(pos)(_.read(pos)) - } - def read(pos:Long, length:Int) = { - get_reader(pos)(_.read(pos, length)) - } - -} diff --git a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/ElectingLevelDBStore.scala b/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/ElectingLevelDBStore.scala deleted file mode 100644 index a47baabe1f..0000000000 --- a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/ElectingLevelDBStore.scala +++ /dev/null @@ -1,505 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.activemq.leveldb.replicated - -import org.linkedin.util.clock.Timespan -import scala.beans.BeanProperty -import org.apache.activemq.util.ServiceStopper -import org.apache.activemq.leveldb.{LevelDBClient, RecordLog, LevelDBStore} -import java.net.{NetworkInterface, InetAddress} -import org.fusesource.hawtdispatch._ -import org.apache.activemq.broker.{LockableServiceSupport, Locker} -import org.apache.activemq.store.PersistenceAdapter -import java.util.concurrent.CountDownLatch -import java.util.concurrent.atomic.AtomicBoolean -import org.apache.activemq.leveldb.util.Log -import java.io.File -import org.apache.activemq.usage.SystemUsage -import org.apache.activemq.ActiveMQMessageAuditNoSync -import org.apache.activemq.broker.jmx.{OpenTypeSupport, BrokerMBeanSupport, AnnotatedMBean} -import javax.management.ObjectName -import javax.management.openmbean.{CompositeDataSupport, SimpleType, CompositeData} -import java.util -import org.apache.activemq.leveldb.replicated.groups._ - -object ElectingLevelDBStore extends Log { - - def machine_hostname: String = { - import collection.JavaConversions._ - // Get the host name of the first non loop-back interface.. - for (interface <- NetworkInterface.getNetworkInterfaces; if (!interface.isLoopback); inet <- interface.getInetAddresses) { - var address = inet.getHostAddress - var name = inet.getCanonicalHostName - if( address!= name ) { - return name - } - } - // Or else just go the simple route. - return InetAddress.getLocalHost.getCanonicalHostName; - } - -} - -/** - * - */ -class ElectingLevelDBStore extends ProxyLevelDBStore { - import ElectingLevelDBStore._ - - def proxy_target = master - - @BeanProperty - var zkAddress = "127.0.0.1:2181" - @BeanProperty - var zkPassword:String = _ - @BeanProperty - var zkPath = "/default" - @BeanProperty - var zkSessionTimeout = "2s" - - var brokerName: String = _ - - @BeanProperty - var container: String = _ - - @BeanProperty - var hostname: String = _ - - @BeanProperty - var connectUrl: String = _ - - @BeanProperty - var bind = "tcp://0.0.0.0:61619" - - @BeanProperty - var weight = 1 - @BeanProperty - var replicas = 3 - @BeanProperty - var sync="quorum_mem" - - def clusterSizeQuorum = (replicas/2) + 1 - - @BeanProperty - var securityToken = "" - - var directory = LevelDBStore.DEFAULT_DIRECTORY; - override def setDirectory(dir: File) { - directory = dir - } - override def getDirectory: File = { - return directory - } - - @BeanProperty - var logSize: Long = 1024 * 1024 * 100 - @BeanProperty - var indexFactory: String = "org.fusesource.leveldbjni.JniDBFactory, org.iq80.leveldb.impl.Iq80DBFactory" - @BeanProperty - var verifyChecksums: Boolean = false - @BeanProperty - var indexMaxOpenFiles: Int = 1000 - @BeanProperty - var indexBlockRestartInterval: Int = 16 - @BeanProperty - var paranoidChecks: Boolean = false - @BeanProperty - var indexWriteBufferSize: Int = 1024 * 1024 * 6 - @BeanProperty - var indexBlockSize: Int = 4 * 1024 - @BeanProperty - var indexCompression: String = "snappy" - @BeanProperty - var logCompression: String = "none" - @BeanProperty - var indexCacheSize: Long = 1024 * 1024 * 256L - @BeanProperty - var flushDelay = 0 - @BeanProperty - var asyncBufferSize = 1024 * 1024 * 4 - @BeanProperty - var monitorStats = false - @BeanProperty - var failoverProducersAuditDepth = ActiveMQMessageAuditNoSync.DEFAULT_WINDOW_SIZE; - @BeanProperty - var maxFailoverProducersToTrack = ActiveMQMessageAuditNoSync.MAXIMUM_PRODUCER_COUNT; - - var master: MasterLevelDBStore = _ - var slave: SlaveLevelDBStore = _ - - var zk_client: ZKClient = _ - var zk_group: ZooKeeperGroup = _ - - var position: Long = -1L - - - override def toString: String = { - return "Replicated LevelDB[%s, %s/%s]".format(directory.getAbsolutePath, zkAddress, zkPath) - } - - var usageManager: SystemUsage = _ - override def setUsageManager(usageManager: SystemUsage) { - this.usageManager = usageManager - } - - def node_id = ReplicatedLevelDBStoreTrait.node_id(directory) - - def init() { - - if(brokerService!=null && brokerService.isUseJmx){ - try { - AnnotatedMBean.registerMBean(brokerService.getManagementContext, new ReplicatedLevelDBStoreView(this), objectName) - } catch { - case e: Throwable => { - warn(e, "PersistenceAdapterReplication could not be registered in JMX: " + e.getMessage) - } - } - } - - // Figure out our position in the store. - directory.mkdirs() - val log = new RecordLog(directory, LevelDBClient.LOG_SUFFIX) - log.logSize = logSize - log.open() - position = try { - log.current_appender.append_position - } finally { - log.close - } - - zk_client = new ZKClient(zkAddress, Timespan.parse(zkSessionTimeout), null) - if( zkPassword!=null ) { - zk_client.setPassword(zkPassword) - } - zk_client.start - zk_client.waitForConnected(Timespan.parse("30s")) - - zk_group = ZooKeeperGroupFactory.create(zk_client, zkPath) - val master_elector = new MasterElector(this) - debug("Starting ZooKeeper group monitor") - master_elector.start(zk_group) - debug("Joining ZooKeeper group") - master_elector.join - - this.setUseLock(true) - this.setLocker(createDefaultLocker()) - } - - def createDefaultLocker(): Locker = new Locker { - - def setLockable(lockable: LockableServiceSupport) {} - def configure(persistenceAdapter: PersistenceAdapter) {} - def setFailIfLocked(failIfLocked: Boolean) {} - def setLockAcquireSleepInterval(lockAcquireSleepInterval: Long) {} - def setName(name: String) {} - - def start() = { - master_started_latch.await() - } - - def keepAlive(): Boolean = { - master_started.get() - } - - def stop() {} - } - - - val master_started_latch = new CountDownLatch(1) - val master_started = new AtomicBoolean(false) - - def start_master(func: (Int) => Unit) = { - assert(master==null) - master = create_master() - master_started.set(true) - master.blocking_executor.execute(^{ - master.start(); - master_stopped.set(false) - master_started_latch.countDown() - }) - master.blocking_executor.execute(^{ - func(master.getPort) - }) - } - - def isMaster = master_started.get() && !master_stopped.get() - - val stopped_latch = new CountDownLatch(1) - val master_stopped = new AtomicBoolean(false) - - def stop_master(func: => Unit) = { - assert(master!=null) - master.blocking_executor.execute(^{ - master.stop(); - master_stopped.set(true) - position = master.wal_append_position - stopped_latch.countDown() - master = null - func - }) - master.blocking_executor.execute(^{ - val broker = brokerService - if( broker!=null ) { - try { - broker.requestRestart(); - broker.stop(); - } catch { - case e:Exception=> warn("Failure occurred while restarting the broker", e); - } - } - }) - } - - def objectName = { - var objectNameStr = BrokerMBeanSupport.createPersistenceAdapterName(brokerService.getBrokerObjectName.toString, "LevelDB[" + directory.getAbsolutePath + "]").toString - objectNameStr += "," + "view=Replication"; - new ObjectName(objectNameStr); - } - - protected def doStart() = { - master_started_latch.await() - } - - protected def doStop(stopper: ServiceStopper) { - if(brokerService!=null && brokerService.isUseJmx){ - brokerService.getManagementContext().unregisterMBean(objectName); - } - if (zk_group != null) { - zk_group.close - zk_group = null - } - if (zk_client != null) { - zk_client.close() - zk_client = null - } - - if( master!=null ) { - val latch = new CountDownLatch(1) - stop_master { - latch.countDown() - } - latch.await() - } - if( slave !=null ) { - val latch = new CountDownLatch(1) - stop_slave { - latch.countDown() - } - latch.await() - - } - if( master_started.get() ) { - stopped_latch.countDown() - } - } - - def start_slave(address: String)(func: => Unit) = { - assert(master==null) - slave = create_slave() - slave.connect = address - slave.blocking_executor.execute(^{ - slave.start(); - func - }) - } - - def stop_slave(func: => Unit) = { - if( slave!=null ) { - val s = slave - slave = null - s.blocking_executor.execute(^{ - s.stop(); - position = s.wal_append_position - func - }) - } - } - - def create_slave() = { - val slave = new SlaveLevelDBStore(); - configure(slave) - slave - } - - def create_master() = { - val master = new MasterLevelDBStore - configure(master) - master.replicas = replicas - master.bind = bind - master.syncTo = sync - master - } - - override def setBrokerName(brokerName: String): Unit = { - this.brokerName = brokerName - } - - - override def deleteAllMessages { - if(proxy_target != null) proxy_target.deleteAllMessages - else { - info("You instructed the broker to delete all messages (on startup?). " + - "Cannot delete all messages from an ElectingLevelDBStore because we need to decide who the master is first") - } - } - - def configure(store: ReplicatedLevelDBStoreTrait) { - store.directory = directory - store.indexFactory = indexFactory - store.verifyChecksums = verifyChecksums - store.indexMaxOpenFiles = indexMaxOpenFiles - store.indexBlockRestartInterval = indexBlockRestartInterval - store.paranoidChecks = paranoidChecks - store.indexWriteBufferSize = indexWriteBufferSize - store.indexBlockSize = indexBlockSize - store.indexCompression = indexCompression - store.logCompression = logCompression - store.indexCacheSize = indexCacheSize - store.flushDelay = flushDelay - store.asyncBufferSize = asyncBufferSize - store.monitorStats = monitorStats - store.securityToken = securityToken - store.setFailoverProducersAuditDepth(failoverProducersAuditDepth) - store.setMaxFailoverProducersToTrack(maxFailoverProducersToTrack) - store.setBrokerName(brokerName) - store.setBrokerService(brokerService) - store.setUsageManager(usageManager) - } - - def address(port: Int) = { - if( connectUrl==null ) { - if (hostname == null) { - hostname = machine_hostname - } - "tcp://" + hostname + ":" + port - } else { - connectUrl; - } - } - - override def size: Long = { - if( master !=null ) { - master.size - } else if( slave !=null ) { - slave.size - } else { - var rc = 0L - if( directory.exists() ) { - for( f <- directory.list() ) { - if( f.endsWith(LevelDBClient.LOG_SUFFIX)) { - rc += f.length - } - } - } - rc - } - } -} - - -class ReplicatedLevelDBStoreView(val store:ElectingLevelDBStore) extends ReplicatedLevelDBStoreViewMBean { - import store._ - - def getZkAddress = zkAddress - def getZkPath = zkPath - def getZkSessionTimeout = zkSessionTimeout - def getBind = bind - def getReplicas = replicas - - def getNodeRole:String = { - if( slave!=null ) { - return "slave" - } - if( master!=null ) { - return "master" - } - "electing" - } - - def getStatus:String = { - if( slave!=null ) { - return slave.status - } - if( master!=null ) { - return master.status - } - "" - } - - object SlaveStatusOTF extends OpenTypeSupport.AbstractOpenTypeFactory { - protected def getTypeName: String = classOf[SlaveStatus].getName - - protected override def init() = { - super.init(); - addItem("nodeId", "nodeId", SimpleType.STRING); - addItem("remoteAddress", "remoteAddress", SimpleType.STRING); - addItem("attached", "attached", SimpleType.BOOLEAN); - addItem("position", "position", SimpleType.LONG); - } - - override def getFields(o: Any): util.Map[String, AnyRef] = { - val status = o.asInstanceOf[SlaveStatus] - val rc = super.getFields(o); - rc.put("nodeId", status.nodeId); - rc.put("remoteAddress", status.remoteAddress); - rc.put("attached", status.attached.asInstanceOf[java.lang.Boolean]); - rc.put("position", status.position.asInstanceOf[java.lang.Long]); - rc - } - } - - def getSlaves():Array[CompositeData] = { - if( master!=null ) { - master.slaves_status.map { status => - val fields = SlaveStatusOTF.getFields(status); - new CompositeDataSupport(SlaveStatusOTF.getCompositeType(), fields).asInstanceOf[CompositeData] - }.toArray - } else { - Array() - } - } - - def getPosition:java.lang.Long = { - if( slave!=null ) { - return new java.lang.Long(slave.wal_append_position) - } - if( master!=null ) { - return new java.lang.Long(master.wal_append_position) - } - null - } - - def getPositionDate:java.lang.Long = { - val rc = if( slave!=null ) { - slave.wal_date - } else if( master!=null ) { - master.wal_date - } else { - 0 - } - if( rc != 0 ) { - return new java.lang.Long(rc) - } else { - return null - } - } - - def getDirectory = directory.getCanonicalPath - def getSync = sync - - def getNodeId: String = node_id -} diff --git a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/MasterElector.scala b/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/MasterElector.scala deleted file mode 100644 index 8400e33f45..0000000000 --- a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/MasterElector.scala +++ /dev/null @@ -1,247 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.activemq.leveldb.replicated - -import org.apache.activemq.leveldb.replicated.groups._ -import com.fasterxml.jackson.annotation.JsonProperty -import org.apache.activemq.leveldb.util.{Log, JsonCodec} -import java.io.IOException - - -class LevelDBNodeState extends NodeState { - - @JsonProperty - var id: String = _ - - @JsonProperty - var container: String = _ - - @JsonProperty - var address: String = _ - - @JsonProperty - var position: Long = -1 - - @JsonProperty - var weight: Int = 0 - - @JsonProperty - var elected: String = _ - - override def equals(obj: Any): Boolean = { - obj match { - case x:LevelDBNodeState => - x.id == id && - x.container == container && - x.address == address && - x.position == position && - x.elected == elected - case _ => false - } - } - - override - def toString = JsonCodec.encode(this).ascii().toString - -} - -object MasterElector extends Log - -/** - */ -class MasterElector(store: ElectingLevelDBStore) extends ClusteredSingleton[LevelDBNodeState](classOf[LevelDBNodeState]) { - - import MasterElector._ - - var last_state: LevelDBNodeState = _ - var elected: String = _ - var position: Long = -1 - var address: String = _ - var updating_store = false - var next_connect: String = _ - var connected_address: String = _ - - def join: Unit = this.synchronized { - last_state = create_state - join(last_state) - add(change_listener) - } - - def elector = this - - def update: Unit = elector.synchronized { - var next = create_state - if (next != last_state) { - last_state = next - join(next) - } - } - - def create_state = { - val rc = new LevelDBNodeState - rc.id = store.brokerName - rc.elected = elected - rc.position = position - rc.weight = store.weight - rc.address = address - rc.container = store.container - rc.address = address - rc - } - - object change_listener extends ChangeListener { - - def connected = changed - def disconnected = { - changed - } - - var stopped = false; - def changed:Unit = elector.synchronized { - debug("ZooKeeper group changed: %s", members) - -// info(eid+" cluster state changed: "+members) - if (isMaster) { - // We are the master elector, we will choose which node will startup the MasterLevelDBStore - members.get(store.brokerName) match { - case None => - info("Not enough cluster members connected to elect a new master.") - case Some(members) => - - if (members.size > store.replicas) { - warn("Too many cluster members are connected. Expected at most "+store.replicas+ - " members but there are "+members.size+" connected.") - } - if (members.size < store.clusterSizeQuorum) { - info("Not enough cluster members connected to elect a master.") - elected = null - } else { - - // If we already elected a master, lets make sure he is still online.. - if (elected != null) { - val by_eid = Map(members: _*) - if (by_eid.get(elected).isEmpty) { - info("Previously elected master is not online, staring new election") - elected = null - } - } - - // Do we need to elect a new master? - if (elected == null) { - // Find the member with the most updates. - val sortedMembers = members.filter(_._2.position >= 0).sortWith { - (a, b) => { - a._2.position > b._2.position || - (a._2.position == b._2.position && a._2.weight > b._2.weight ) - } - } - if (sortedMembers.size != members.size) { - info("Not enough cluster members have reported their update positions yet.") - } else { - // We now have an election. - elected = sortedMembers.head._1 - } - } - // Sort by the positions in the cluster.. - } - } - } else { - // Only the master sets the elected field. - elected = null - } - - val master_elected = if(eid==null) null else master.map(_.elected).getOrElse(null) - - // If no master is currently elected, we need to report our current store position. - // Since that will be used to select the master. - val connect_target = if (master_elected != null) { - position = -1 - members.get(store.brokerName).get.find(_._1 == master_elected).map(_._2.address).getOrElse(null) - } else { - // Once we are not running a master or server, report the position.. - if( connected_address==null && address==null && !updating_store ) { - position = store.position - } - null - } - - // Do we need to stop the running master? - if ((eid==null || master_elected != eid) && address!=null && !updating_store) { - info("Demoted to slave") - updating_store = true - store.stop_master { - elector.synchronized { - updating_store = false - info("Master stopped") - address = null - changed - } - } - } - - // Have we been promoted to being the master? - if (eid!=null && master_elected == eid && address==null && !updating_store ) { - info("Promoted to master") - updating_store = true - store.start_master { port => - elector.synchronized { - updating_store = false - address = store.address(port) - info("Master started: "+address) - changed - } - } - } - - // Can we become a slave? - if ( (eid==null || master_elected != eid) && address == null) { - // Did the master address change? - if (connect_target != connected_address) { - - // Do we need to setup a new slave. - if (connect_target != null && !updating_store) { - updating_store = true - store.start_slave(connect_target) { - elector.synchronized { - updating_store=false - info("Slave started") - connected_address = connect_target - changed - } - } - } - - // Lets stop the slave.. - if (connect_target == null && !updating_store) { - updating_store = true - store.stop_slave { - elector.synchronized { - updating_store=false - info("Slave stopped") - connected_address = null - changed - } - } - } - } - } - if( group.zk.isConnected ) { - update - } - } - } -} diff --git a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/MasterLevelDBClient.scala b/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/MasterLevelDBClient.scala deleted file mode 100644 index 8813e6c1e7..0000000000 --- a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/MasterLevelDBClient.scala +++ /dev/null @@ -1,162 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.activemq.leveldb.replicated - - -import org.apache.activemq.leveldb.util._ - -import FileSupport._ -import java.io._ -import org.apache.activemq.leveldb.{RecordLog, LevelDBClient} -import java.util -import org.apache.activemq.leveldb.replicated.dto.{SyncResponse, FileInfo} - -/** - * @author Hiram Chirino - */ -object MasterLevelDBClient extends Log { - - val MANIFEST_SUFFIX = ".mf" - val LOG_SUFFIX = LevelDBClient.LOG_SUFFIX - val INDEX_SUFFIX = LevelDBClient.INDEX_SUFFIX - -} - -/** - * - * @author Hiram Chirino - */ -class MasterLevelDBClient(val store:MasterLevelDBStore) extends LevelDBClient(store) { - import MasterLevelDBClient._ - import collection.JavaConversions._ - - var snapshots_pending_delete = new util.TreeSet[Long]() - - def slave_held_snapshots = { - val rc = new util.HashSet[Long]() - for( v <- store.slaves.values() ; s <- v.held_snapshot ) { - rc.add(s) - } - rc - } - - override def replaceLatestSnapshotDirectory(newSnapshotIndexPos: Long) { - if( slave_held_snapshots.contains(lastIndexSnapshotPos) ) { - // is a slave is holding open a snapshot.. lets not delete it's data just yet... - snapshots_pending_delete.add(newSnapshotIndexPos) - lastIndexSnapshotPos = newSnapshotIndexPos - } else { - super.replaceLatestSnapshotDirectory(newSnapshotIndexPos) - } - } - - override def gc(topicPositions: Seq[(Long, Long)]) { - val snapshots_to_rm = new util.HashSet(snapshots_pending_delete) - snapshots_to_rm.removeAll(slave_held_snapshots); - - for ( snapshot <- snapshots_to_rm ) { - snapshotIndexFile(snapshot).recursiveDelete - } - super.gc(topicPositions) - } - - override def oldest_retained_snapshot: Long = { - if ( snapshots_pending_delete.isEmpty ) { - super.oldest_retained_snapshot - } else { - snapshots_pending_delete.first() - } - } - - def snapshot_state(snapshot_id:Long) = { - def info(file:File) = { - val rc = new FileInfo - rc.file = file.getName - rc.length = file.length() - rc - } - - val rc = new SyncResponse - rc.snapshot_position = snapshot_id - rc.wal_append_position = log.current_appender.append_position - - for( file <- logDirectory.listFiles; if file.getName.endsWith(LOG_SUFFIX) ) { - // Only need to sync up to what's been flushed. - val fileInfo = info(file) - if( log.current_appender.file == file ) { - rc.append_log = file.getName - fileInfo.length = log.current_appender.flushed_offset.get() - fileInfo.crc32 = file.crc32(fileInfo.length) - } else { - fileInfo.crc32 = file.cached_crc32 - } - rc.log_files.add(fileInfo) - } - - val index_dir = LevelDBClient.create_sequence_file(directory, snapshot_id, INDEX_SUFFIX) - if( index_dir.exists() ) { - for( file <- index_dir.listFiles ) { - val name = file.getName - if( name !="LOCK" ) { - rc.index_files.add(info(file)) - } - } - } - - rc - } - - - // Override the log appender implementation so that it - // stores the logs on the local and remote file systems. - override def createLog = new RecordLog(directory, LOG_SUFFIX) { - - override def create_log_appender(position: Long, offset:Long) = { - new LogAppender(next_log(position), position, offset) { - - val file_name = file.getName - - override def flush = this.synchronized { - val offset = flushed_offset.get() - super.flush - val length = flushed_offset.get() - offset; - store.replicate_wal(file, position, offset, length) - } - - override def force = { - import MasterLevelDBStore._ - if( (store.syncToMask & SYNC_TO_DISK) != 0) { - super.force - } - if( (store.syncToMask & SYNC_TO_REMOTE) != 0) { - flush - store.wal_sync_to(position+flushed_offset.get()) - } - } - - override def on_close { - super.force - } - } - } - - override protected def onDelete(file: Long) = { - super.onDelete(file) - store.replicate_log_delete(file) - } - } -} diff --git a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/MasterLevelDBStore.scala b/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/MasterLevelDBStore.scala deleted file mode 100644 index 00f16ba350..0000000000 --- a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/MasterLevelDBStore.scala +++ /dev/null @@ -1,468 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.activemq.leveldb.replicated - -import org.apache.activemq.leveldb.LevelDBStore -import org.apache.activemq.util.ServiceStopper -import org.apache.activemq.leveldb.util.FileSupport._ -import org.apache.activemq.leveldb.util.{JsonCodec, Log} -import org.fusesource.hawtdispatch._ -import org.apache.activemq.leveldb.replicated.dto._ -import org.fusesource.hawtdispatch.transport._ -import java.util.concurrent._ -import java.io.{IOException, File} -import java.net.{SocketAddress, InetSocketAddress, URI} -import java.util.concurrent.atomic.{AtomicBoolean, AtomicLong} -import scala.beans.BeanProperty -import org.fusesource.hawtbuf.{Buffer, AsciiBuffer} - -class PositionSync(val position:Long, count:Int) extends CountDownLatch(count) - -object MasterLevelDBStore extends Log { - - val SYNC_TO_DISK = 0x01 - val SYNC_TO_REMOTE = 0x02 - val SYNC_TO_REMOTE_MEMORY = 0x04 | SYNC_TO_REMOTE - val SYNC_TO_REMOTE_DISK = 0x08 | SYNC_TO_REMOTE - -} - -case class SlaveStatus(nodeId:String, remoteAddress:String, attached:Boolean, position:Long) - -/** - */ -class MasterLevelDBStore extends LevelDBStore with ReplicatedLevelDBStoreTrait { - - import MasterLevelDBStore._ - import collection.JavaConversions._ - import ReplicationSupport._ - - @BeanProperty - var bind = "tcp://0.0.0.0:61619" - - @BeanProperty - var replicas = 3 - def minSlaveAcks = replicas/2 - - var _syncTo="quorum_mem" - var syncToMask=SYNC_TO_REMOTE_MEMORY - - @BeanProperty - def syncTo = _syncTo - @BeanProperty - def syncTo_=(value:String) { - _syncTo = value - syncToMask = 0 - for( v <- value.split(",").map(_.trim.toLowerCase) ) { - v match { - case "" => - case "local_mem" => - case "local_disk" => syncToMask |= SYNC_TO_DISK - case "remote_mem" => syncToMask |= SYNC_TO_REMOTE_MEMORY - case "remote_disk" => syncToMask |= SYNC_TO_REMOTE_DISK - case "quorum_mem" => syncToMask |= SYNC_TO_REMOTE_MEMORY - case "quorum_disk" => syncToMask |= SYNC_TO_REMOTE_DISK | SYNC_TO_DISK - case x => warn("Unknown syncTo value: [%s]", x) - } - } - } - - val slaves = new ConcurrentHashMap[String,SlaveState]() - - def slaves_status = slaves.values().map(_.status) - - def status = { - var caughtUpCounter = 0 - var notCaughtUpCounter = 0 - for( slave <- slaves.values() ) { - if( slave.isCaughtUp ) { - caughtUpCounter += 1 - } else { - notCaughtUpCounter += 1 - } - } - var rc = "" - if( notCaughtUpCounter > 0 ) { - rc += "%d slave nodes attaching. ".format(notCaughtUpCounter) - } - if( caughtUpCounter > 0 ) { - rc += "%d slave nodes attached. ".format(caughtUpCounter) - } - rc - } - - override def doStart = { - unstash(directory) - super.doStart - start_protocol_server - // Lets not complete the startup until at least one slave is synced up. - wal_sync_to(wal_append_position) - } - - override def doStop(stopper: ServiceStopper): Unit = { - if( transport_server!=null ) { - stop_protocol_server - transport_server = null - } - super.doStop(stopper) - } - - override def createClient = new MasterLevelDBClient(this) - def master_client = client.asInstanceOf[MasterLevelDBClient] - - ////////////////////////////////////// - // Replication Protocol Stuff - ////////////////////////////////////// - var transport_server:TransportServer = _ - val start_latch = new CountDownLatch(1) - - def start_protocol_server = { - transport_server = new TcpTransportServer(new URI(bind)) - transport_server.setBlockingExecutor(blocking_executor) - transport_server.setDispatchQueue(createQueue("master: "+node_id)) - transport_server.setTransportServerListener(new TransportServerListener(){ - def onAccept(transport: Transport) { - transport.setDispatchQueue(createQueue("connection from "+transport.getRemoteAddress)) - transport.setBlockingExecutor(blocking_executor) - new Session(transport).start - - } - def onAcceptError(error: Exception) { - warn(error) - } - }) - transport_server.start(^{ - start_latch.countDown() - }) - start_latch.await() - } - - def getPort = { - start_latch.await() - transport_server.getSocketAddress.asInstanceOf[InetSocketAddress].getPort - } - - def stop_protocol_server = { - transport_server.stop(NOOP) - } - - class Session(transport: Transport) extends TransportHandler(transport) { - - var login:Login = _ - var slave_state:SlaveState = _ - var disconnected = false - - def queue = transport.getDispatchQueue - - override def onTransportFailure(error: IOException) { - if( !disconnected ) { - warn("Unexpected session error: "+error) - } - super.onTransportFailure(error) - } - - def onTransportCommand(command: Any) = { - command match { - case command:ReplicationFrame => - command.action match { - case LOGIN_ACTION => - handle_login(JsonCodec.decode(command.body, classOf[Login])) - case SYNC_ACTION => - handle_sync() - case GET_ACTION => - handle_get(JsonCodec.decode(command.body, classOf[Transfer])) - case ACK_ACTION => - handle_ack(JsonCodec.decode(command.body, classOf[WalAck])) - case DISCONNECT_ACTION => - handle_disconnect() - case _ => - sendError("Unknown frame action: "+command.action) - } - } - } - - def handle_login(request:Login):Unit = { - if( request.security_token != securityToken ) { - sendError("Invalid security_token"); - } else { - login = request; - sendOk(null) - } - } - - override def onTransportDisconnected() { - val slave_state = this.slave_state; - if( slave_state !=null ) { - this.slave_state=null - if( slave_state.stop(this) && isStarted ) { - slaves.remove(slave_state.slave_id, slave_state) - } - } - } - - def handle_disconnect():Unit = { - disconnected = true; - sendOk(null) - } - - def handle_sync():Unit = { - if( login == null ) { - sendError("Not logged in") - return; - } - debug("handle_sync") - slave_state = slaves.get(login.node_id) - if ( slave_state == null ) { - slave_state = new SlaveState(login.node_id) - slaves.put(login.node_id, slave_state) - } - slave_state.start(Session.this) - } - - def handle_ack(req:WalAck):Unit = { - if( login == null || slave_state == null) { - return; - } - trace("%s: Got WAL ack, position: %d, from: %s", directory, req.position, slave_state.slave_id) - slave_state.position_update(req.position) - } - - def handle_get(req:Transfer):Unit = { - if( login == null ) { - sendError("Not logged in") - return; - } - - val file = if( req.file.startsWith("log/" ) ) { - client.logDirectory / req.file.stripPrefix("log/") - } else { - client.directory / req.file - } - - if( !file.exists() ) { - sendError("file does not exist") - return - } - val length = file.length() - - if( req.offset > length ) { - sendError("Invalid offset") - return - } - if( req.offset+req.length > length ) { - sendError("Invalid length") - } - sendOk(null) - send(new FileTransferFrame(file, req.offset, req.length)) - } - - } - - class SlaveState(val slave_id:String) { - - var held_snapshot:Option[Long] = None - var session:Session = _ - var position = new AtomicLong(0) - var caughtUp = new AtomicBoolean(false) - var socketAddress:SocketAddress = _ - - def start(session:Session) = { - debug("SlaveState:start") - socketAddress = session.transport.getRemoteAddress - session.queue.setLabel(transport_server.getDispatchQueue.getLabel+" -> "+slave_id) - - val resp = this.synchronized { - if( this.session!=null ) { - this.session.transport.stop(NOOP) - } - - this.session = session - val snapshot_id = client.lastIndexSnapshotPos - held_snapshot = Option(snapshot_id) - position.set(0) - master_client.snapshot_state(snapshot_id) - } - info("Slave has connected: "+slave_id) - session.queue { - session.sendOk(resp) - } - } - - def stop(session:Session) = { - this.synchronized { - if( this.session == session ) { - info("Slave has disconnected: "+slave_id) - true - } else { - false - } - } - } - - def queue(func: (Session)=>Unit) = { - val h = this.synchronized { - session - } - if( h !=null ) { - h.queue { - func(session) - } - } - } - - def replicate(value:LogDelete):Unit = { - val frame = new ReplicationFrame(LOG_DELETE_ACTION, JsonCodec.encode(value)) - queue { session => - session.send(frame) - } - } - - var unflushed_replication_frame:DeferredReplicationFrame = null - - class DeferredReplicationFrame(file:File, val position:Long, _offset:Long, initialLength:Long) extends ReplicationFrame(WAL_ACTION, null) { - val fileTransferFrame = new FileTransferFrame(file, _offset, initialLength) - var encoded:Buffer = null - - def offset = fileTransferFrame.offset - def length = fileTransferFrame.length - - override def body: Buffer = { - if( encoded==null ) { - val value = new LogWrite - value.file = position; - value.offset = offset; - value.sync = (syncToMask & SYNC_TO_REMOTE_DISK)!=0 - value.length = fileTransferFrame.length - value.date = date - encoded = JsonCodec.encode(value) - } - encoded - } - } - - def replicate(file:File, position:Long, offset:Long, length:Long):Unit = { - queue { session => - - // Check to see if we can merge the replication event /w the previous event.. - if( unflushed_replication_frame == null || - unflushed_replication_frame.position!=position || - (unflushed_replication_frame.offset+unflushed_replication_frame.length)!=offset ) { - - // We could not merge the replication event /w the previous event.. - val frame = new DeferredReplicationFrame(file, position, offset, length) - unflushed_replication_frame = frame - session.send(frame, ()=>{ - trace("%s: Sent WAL update: (file:%s, offset: %d, length: %d) to %s", directory, file, frame.offset, frame.length, slave_id) - if( unflushed_replication_frame eq frame ) { - unflushed_replication_frame = null - } - }) - session.send(frame.fileTransferFrame) - - } else { - // We were able to merge.. yay! - assert(unflushed_replication_frame.encoded == null) - unflushed_replication_frame.fileTransferFrame.length += length - } - } - } - - def position_update(position:Long) = { - this.position.getAndSet(position) - check_position_sync - } - - @volatile - var last_position_sync:PositionSync = null - def check_position_sync = { - val p = position_sync - if( last_position_sync!=p ) { - if( position.get >= p.position ) { - if( caughtUp.compareAndSet(false, true) ) { - info("Slave has now caught up: "+slave_id) - this.synchronized { - this.held_snapshot = None - } - } - p.countDown - last_position_sync = p - } - } - } - - def isCaughtUp = caughtUp.get() - - def status = SlaveStatus(slave_id, socketAddress.toString, isCaughtUp, position.get()) - } - - @volatile - var position_sync = new PositionSync(0L, 0) - - def wal_sync_to(position:Long):Unit = { - if( minSlaveAcks<1 || (syncToMask & SYNC_TO_REMOTE)==0) { - return - } - - if( isStoppedOrStopping ) { - throw new IllegalStateException("Store replication stopped") - } - - val position_sync = new PositionSync(position, minSlaveAcks) - this.position_sync = position_sync - for( slave <- slaves.values() ) { - slave.check_position_sync - } - - while( !position_sync.await(1, TimeUnit.SECONDS) ) { - if( isStoppedOrStopping ) { - throw new IllegalStateException("Store replication stopped") - } - warn("Store update waiting on %d replica(s) to catch up to log position %d. %s", minSlaveAcks, position, status) - } - } - - - def isStoppedOrStopping: Boolean = { - if( isStopped || isStopping ) - return true - if( broker_service!=null && broker_service.isStopping ) - return true - false - } - - def date = System.currentTimeMillis() - - def replicate_wal(file:File, position:Long, offset:Long, length:Long):Unit = { - if( length > 0 ) { - for( slave <- slaves.values() ) { - slave.replicate(file, position, offset, length) - } - } - } - - def replicate_log_delete(log:Long):Unit = { - val value = new LogDelete - value.log = log - for( slave <- slaves.values() ) { - slave.replicate(value) - } - } - - def wal_append_position = client.wal_append_position - @volatile - var wal_date = 0L -} diff --git a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/ProxyLevelDBStore.scala b/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/ProxyLevelDBStore.scala deleted file mode 100644 index 9822fe21f7..0000000000 --- a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/ProxyLevelDBStore.scala +++ /dev/null @@ -1,137 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.activemq.leveldb.replicated - -import org.apache.activemq.broker.{LockableServiceSupport, BrokerService, BrokerServiceAware, ConnectionContext} -import org.apache.activemq.command._ -import org.apache.activemq.leveldb.LevelDBStore -import org.apache.activemq.store._ -import org.apache.activemq.usage.SystemUsage -import java.io.File -import java.io.IOException -import java.util.Set -import org.apache.activemq.util.{ServiceStopper, ServiceSupport} -import org.apache.activemq.broker.scheduler.JobSchedulerStore - -/** - */ -abstract class ProxyLevelDBStore extends LockableServiceSupport with BrokerServiceAware with PersistenceAdapter with TransactionStore with PListStore { - - def proxy_target: LevelDBStore - - def beginTransaction(context: ConnectionContext) { - proxy_target.beginTransaction(context) - } - - def getLastProducerSequenceId(id: ProducerId): Long = { - return proxy_target.getLastProducerSequenceId(id) - } - - def createTopicMessageStore(destination: ActiveMQTopic): TopicMessageStore = { - return proxy_target.createTopicMessageStore(destination) - } - - def createJobSchedulerStore():JobSchedulerStore = { - throw new UnsupportedOperationException(); - } - - def setDirectory(dir: File) { - proxy_target.setDirectory(dir) - } - - def checkpoint(sync: Boolean) { - proxy_target.checkpoint(sync) - } - - def createTransactionStore: TransactionStore = { - return proxy_target.createTransactionStore - } - - def setUsageManager(usageManager: SystemUsage) { - proxy_target.setUsageManager(usageManager) - } - - def commitTransaction(context: ConnectionContext) { - proxy_target.commitTransaction(context) - } - - def getLastMessageBrokerSequenceId: Long = { - return proxy_target.getLastMessageBrokerSequenceId - } - - def setBrokerName(brokerName: String) { - proxy_target.setBrokerName(brokerName) - } - - def rollbackTransaction(context: ConnectionContext) { - proxy_target.rollbackTransaction(context) - } - - def removeTopicMessageStore(destination: ActiveMQTopic) { - proxy_target.removeTopicMessageStore(destination) - } - - def getDirectory: File = { - return proxy_target.getDirectory - } - - def size: Long = { - return proxy_target.size - } - - def removeQueueMessageStore(destination: ActiveMQQueue) { - proxy_target.removeQueueMessageStore(destination) - } - - def createQueueMessageStore(destination: ActiveMQQueue): MessageStore = { - return proxy_target.createQueueMessageStore(destination) - } - - def deleteAllMessages { - proxy_target.deleteAllMessages - } - - def getDestinations: Set[ActiveMQDestination] = { - return proxy_target.getDestinations - } - - def rollback(txid: TransactionId) { - proxy_target.rollback(txid) - } - - def recover(listener: TransactionRecoveryListener) { - proxy_target.recover(listener) - } - - def prepare(txid: TransactionId) { - proxy_target.prepare(txid) - } - - def commit(txid: TransactionId, wasPrepared: Boolean, preCommit: Runnable, postCommit: Runnable) { - proxy_target.commit(txid, wasPrepared, preCommit, postCommit) - } - - def getPList(name: String): PList = { - return proxy_target.getPList(name) - } - - def removePList(name: String): Boolean = { - return proxy_target.removePList(name) - } - - def allowIOResumption() = {} -} \ No newline at end of file diff --git a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/ReplicatedLevelDBStoreTrait.scala b/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/ReplicatedLevelDBStoreTrait.scala deleted file mode 100644 index 81efbf5576..0000000000 --- a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/ReplicatedLevelDBStoreTrait.scala +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.activemq.leveldb.replicated - -import scala.beans.BeanProperty -import java.util.UUID -import org.apache.activemq.leveldb.LevelDBStore -import org.apache.activemq.leveldb.util.FileSupport._ -import java.io.File - -object ReplicatedLevelDBStoreTrait { - - def create_uuid = UUID.randomUUID().toString - - def node_id(directory:File):String = { - val nodeid_file = directory / "nodeid.txt" - if( nodeid_file.exists() ) { - nodeid_file.readText() - } else { - val rc = create_uuid - nodeid_file.getParentFile.mkdirs() - nodeid_file.writeText(rc) - rc - } - } -} - -/** - */ -trait ReplicatedLevelDBStoreTrait extends LevelDBStore { - - @BeanProperty - var securityToken = "" - - def node_id = ReplicatedLevelDBStoreTrait.node_id(directory) - - def storeId:String = { - val storeid_file = directory / "storeid.txt" - if( storeid_file.exists() ) { - storeid_file.readText() - } else { - null - } - } - - def storeId_=(value:String) { - val storeid_file = directory / "storeid.txt" - storeid_file.writeText(value) - } - - -} diff --git a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/ReplicationProtocolCodec.scala b/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/ReplicationProtocolCodec.scala deleted file mode 100644 index b218f4c90e..0000000000 --- a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/ReplicationProtocolCodec.scala +++ /dev/null @@ -1,109 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.activemq.leveldb.replicated - -import org.fusesource.hawtbuf.{Buffer, AsciiBuffer} -import org.fusesource.hawtdispatch.transport.AbstractProtocolCodec -import org.fusesource.hawtdispatch.transport.AbstractProtocolCodec.Action -import java.nio.{MappedByteBuffer, ByteBuffer} -import org.fusesource.hawtdispatch.Task -import java.io.{OutputStream, File} -import org.fusesource.hawtdispatch.transport.ProtocolCodec.BufferState -import java.util - -class ReplicationFrame(val action:AsciiBuffer, _body:Buffer) { - def body = _body -} -class FileTransferFrame(val file:File, val offset:Long, var length:Long) - -class ReplicationProtocolCodec extends AbstractProtocolCodec { - import ReplicationSupport._ - val transfers = new util.LinkedList[MappedByteBuffer](); - - def encode(value: Any) { - value match { - case value:ReplicationFrame => - value.action.writeTo(nextWriteBuffer.asInstanceOf[OutputStream]) - nextWriteBuffer.write('\n'); - if( value.body!=null ) { - value.body.writeTo(nextWriteBuffer.asInstanceOf[OutputStream]) - } - nextWriteBuffer.write(0); - case value:FileTransferFrame => - if( value.length > 0 ) { - val buffer = map(value.file, value.offset, value.length, true) - writeDirect(buffer); - if( buffer.hasRemaining ) { - transfers.addLast(buffer) - } else { - unmap(buffer) - } - } - case value:Buffer => - value.writeTo(nextWriteBuffer.asInstanceOf[OutputStream]) - } - } - - - override def flush(): BufferState = { - val rc = super.flush() - while( !transfers.isEmpty && !transfers.peekFirst().hasRemaining) { - unmap(transfers.removeFirst()) - } - rc - } - - def initialDecodeAction() = readHeader - - val readHeader = new Action() { - def apply = { - val action_line:Buffer = readUntil('\n'.toByte, 80) - if( action_line!=null ) { - action_line.moveTail(-1); - nextDecodeAction = readReplicationFrame(action_line.ascii()) - nextDecodeAction.apply() - } else { - null - } - } - } - - def readReplicationFrame(action:AsciiBuffer):Action = new Action() { - def apply = { - val data:Buffer = readUntil(0.toByte, 1024*64) - if( data!=null ) { - data.moveTail(-1); - nextDecodeAction = readHeader - new ReplicationFrame(action, data) - } else { - null - } - } - } - - def readData(data_target:ByteBuffer, cb:Task) = { - nextDecodeAction = new Action() { - def apply = { - if( readDirect(data_target) ) { - nextDecodeAction = readHeader - cb.run() - } - null - } - } - } -} diff --git a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/ReplicationSupport.scala b/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/ReplicationSupport.scala deleted file mode 100644 index 4e41a2e722..0000000000 --- a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/ReplicationSupport.scala +++ /dev/null @@ -1,126 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.activemq.leveldb.replicated - -import org.fusesource.hawtbuf.Buffer._ -import java.util.concurrent._ -import java.nio.MappedByteBuffer -import sun.nio.ch.DirectBuffer -import java.io.{RandomAccessFile, File} -import java.nio.channels.FileChannel -import java.util.concurrent.atomic.AtomicInteger -import org.fusesource.hawtdispatch._ -import org.apache.activemq.leveldb.util.FileSupport._ -import org.apache.activemq.leveldb.LevelDBClient -import scala.collection.immutable.TreeMap - -object ReplicationSupport { - - val WAL_ACTION = ascii("wal") - val LOGIN_ACTION= ascii("LevelDB Store Replication v1:login") - val SYNC_ACTION = ascii("sync") - val GET_ACTION = ascii("get") - val ACK_ACTION = ascii("ack") - val OK_ACTION = ascii("ok") - val DISCONNECT_ACTION = ascii("disconnect") - val ERROR_ACTION = ascii("error") - val LOG_DELETE_ACTION = ascii("rm") - - def unmap(buffer:MappedByteBuffer ) { - try { - buffer.asInstanceOf[DirectBuffer].cleaner().clean(); - } catch { - case ignore:Throwable => - } - } - - def map(file:File, offset:Long, length:Long, readOnly:Boolean) = { - val raf = new RandomAccessFile(file, if(readOnly) "r" else "rw"); - try { - val mode = if (readOnly) FileChannel.MapMode.READ_ONLY else FileChannel.MapMode.READ_WRITE - raf.getChannel().map(mode, offset, length); - } finally { - raf.close(); - } - } - - def stash(directory:File) { - directory.mkdirs() - val tmp_stash = directory / "stash.tmp" - val stash = directory / "stash" - stash.recursiveDelete - tmp_stash.recursiveDelete - tmp_stash.mkdirs() - copy_store_dir(directory, tmp_stash) - tmp_stash.renameTo(stash) - } - - def copy_store_dir(from:File, to:File) = { - val log_files = LevelDBClient.find_sequence_files(from, LevelDBClient.LOG_SUFFIX) - if( !log_files.isEmpty ) { - val append_file = log_files.last._2 - for( file <- log_files.values ; if file != append_file) { - file.linkTo(to / file.getName) - val crc_file = file.getParentFile / (file.getName+".crc32" ) - if( crc_file.exists() ) { - crc_file.linkTo(to / crc_file.getName) - } - } - append_file.copyTo(to / append_file.getName) - } - - val index_dirs = LevelDBClient.find_sequence_files(from, LevelDBClient.INDEX_SUFFIX) - if( !index_dirs.isEmpty ) { - val index_file = index_dirs.last._2 - var target = to / index_file.getName - target.mkdirs() - LevelDBClient.copyIndex(index_file, target) - } - } - - def stash_clear(directory:File) { - val stash = directory / "stash" - stash.recursiveDelete - } - - def unstash(directory:File) { - val tmp_stash = directory / "stash.tmp" - tmp_stash.recursiveDelete - val stash = directory / "stash" - if( stash.exists() ) { - delete_store(directory) - copy_store_dir(stash, directory) - stash.recursiveDelete - } - } - - def delete_store(directory: File) { - // Delete any existing files to make space for the stash we will be restoring.. - var t: TreeMap[Long, File] = LevelDBClient.find_sequence_files(directory, LevelDBClient.LOG_SUFFIX) - for (entry <- t) { - val file = entry._2 - file.delete() - val crc_file = directory / (file.getName+".crc32" ) - if( crc_file.exists() ) { - crc_file.delete() - } - } - for (file <- LevelDBClient.find_sequence_files(directory, LevelDBClient.INDEX_SUFFIX)) { - file._2.recursiveDelete - } - } -} diff --git a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/SlaveLevelDBStore.scala b/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/SlaveLevelDBStore.scala deleted file mode 100644 index cbacc77233..0000000000 --- a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/SlaveLevelDBStore.scala +++ /dev/null @@ -1,461 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.activemq.leveldb.replicated - -import org.apache.activemq.leveldb.{LevelDBStoreTest, LevelDBClient, LevelDBStore} -import org.apache.activemq.util.ServiceStopper -import java.util -import org.fusesource.hawtdispatch._ -import org.apache.activemq.leveldb.replicated.dto._ -import org.fusesource.hawtdispatch.transport._ -import java.net.URI -import org.fusesource.hawtbuf.{Buffer, AsciiBuffer} -import org.apache.activemq.leveldb.util._ - -import FileSupport._ -import java.io.{IOException, RandomAccessFile, File} -import scala.beans.BeanProperty -import java.util.concurrent.{CountDownLatch, TimeUnit} -import javax.management.ObjectName -import org.apache.activemq.broker.jmx.AnnotatedMBean - -object SlaveLevelDBStore extends Log - -/** - */ -class SlaveLevelDBStore extends LevelDBStore with ReplicatedLevelDBStoreTrait { - - import SlaveLevelDBStore._ - import ReplicationSupport._ - import collection.JavaConversions._ - - @BeanProperty - var connect = "tcp://0.0.0.0:61619" - - val queue = createQueue("leveldb replication slave") - var replay_from = 0L - var caughtUp = false - - var wal_session:Session = _ - var transfer_session:Session = _ - - var status = "initialized" - - override def createClient = new LevelDBClient(this) { - // We don't want to start doing index snapshots until - // he slave is caught up. - override def post_log_rotate: Unit = { - if( caughtUp ) { - writeExecutor { - snapshotIndex(false) - } - } - } - - // The snapshots we create are based on what has been replayed. - override def nextIndexSnapshotPos:Long = indexRecoveryPosition - } - - override def doStart() = { - queue.setLabel("slave: "+node_id) - client.init() - if (purgeOnStatup) { - purgeOnStatup = false - db.client.locked_purge - info("Purged: "+this) - } - db.client.dirtyIndexFile.recursiveDelete - db.client.plistIndexFile.recursiveDelete - start_slave_connections - - if( java.lang.Boolean.getBoolean("org.apache.activemq.leveldb.test") ) { - val name = new ObjectName(objectName.toString + ",view=Test") - AnnotatedMBean.registerMBean(brokerService.getManagementContext, new LevelDBStoreTest(this), name) - } - } - - var stopped = false - override def doStop(stopper: ServiceStopper) = { - if( java.lang.Boolean.getBoolean("org.apache.activemq.leveldb.test") ) - brokerService.getManagementContext().unregisterMBean(new ObjectName(objectName.toString+",view=Test")); - - val latch = new CountDownLatch(1) - stop_connections(^{ - latch.countDown - }) - // Make sure the sessions are stopped before we close the client. - latch.await() - client.stop() - } - - - def restart_slave_connections = { - stop_connections(^{ - client.stop() - client = createClient - client.init() - start_slave_connections - }) - } - - def start_slave_connections = { - val transport: TcpTransport = create_transport - - status = "Attaching to master: "+connect - info(status) - wal_session = new Session(transport, (session)=>{ - // lets stash away our current state so that we can unstash it - // in case we don't get caught up.. If the master dies, - // the stashed data might be the best option to become the master. - stash(directory) - delete_store(directory) - debug("Log replication session connected") - session.request_then(SYNC_ACTION, null) { body => - val response = JsonCodec.decode(body, classOf[SyncResponse]) - transfer_missing(response) - session.handler = wal_handler(session) - } - }) - wal_session.start - } - - def create_transport: TcpTransport = { - val transport = new TcpTransport() - transport.setBlockingExecutor(blocking_executor) - transport.setDispatchQueue(queue) - transport.connecting(new URI(connect), null) - transport - } - - def stop_connections(cb:Task) = { - var task = ^{ - unstash(directory) - cb.run() - } - val wal_session_copy = wal_session - if( wal_session_copy !=null ) { - wal_session = null - val next = task - task = ^{ - wal_session_copy.transport.stop(next) - } - } - val transfer_session_copy = transfer_session - if( transfer_session_copy !=null ) { - transfer_session = null - val next = task - task = ^{ - transfer_session_copy.transport.stop(next) - } - } - task.run(); - } - - - var wal_append_position = 0L - var wal_append_offset = 0L - @volatile - var wal_date = 0L - - def send_wal_ack = { - queue.assertExecuting() - if( caughtUp && !stopped && wal_session!=null) { - val ack = new WalAck() - ack.position = wal_append_position -// info("Sending ack: "+wal_append_position) - wal_session.send_replication_frame(ACK_ACTION, ack) - if( replay_from != ack.position ) { - val old_replay_from = replay_from - replay_from = ack.position - client.writeExecutor { - client.replay_from(old_replay_from, ack.position, false) - } - } - } - } - - val pending_log_removes = new util.ArrayList[Long]() - - def wal_handler(session:Session): (AnyRef)=>Unit = (command)=>{ - command match { - case command:ReplicationFrame => - command.action match { - case WAL_ACTION => - val value = JsonCodec.decode(command.body, classOf[LogWrite]) - if( caughtUp && value.offset ==0 && value.file!=0 ) { - client.log.rotate - } - trace("%s, Slave WAL update: (file:%s, offset: %d, length: %d)".format(directory, value.file.toHexString, value.offset, value.length)) - val file = client.log.next_log(value.file) - val buffer = map(file, value.offset, value.length, false) - - def readData = session.codec.readData(buffer, ^{ - if( value.sync ) { - buffer.force() - } - - unmap(buffer) - wal_append_offset = value.offset+value.length - wal_append_position = value.file + wal_append_offset - wal_date = value.date - if( !stopped ) { - if( caughtUp ) { - client.log.current_appender.skip(value.length) - } - send_wal_ack - } - }) - - if( client.log.recordLogTestSupport!=null ) { - client.log.recordLogTestSupport.writeCall.call { - readData - } - } else { - readData - } - - case LOG_DELETE_ACTION => - - val value = JsonCodec.decode(command.body, classOf[LogDelete]) - if( !caughtUp ) { - pending_log_removes.add(value.log) - } else { - client.log.delete(value.log) - } - - case OK_ACTION => - // This comes in as response to a disconnect we send. - case _ => session.fail("Unexpected command action: "+command.action) - } - } - } - - class Session(transport:Transport, on_login: (Session)=>Unit) extends TransportHandler(transport) { - - val response_callbacks = new util.LinkedList[(ReplicationFrame)=>Unit]() - - override def onTransportFailure(error: IOException) { - if( isStarted ) { - warn("Unexpected session error: "+error) - queue.after(1, TimeUnit.SECONDS) { - if( isStarted ) { - restart_slave_connections - } - } - } - super.onTransportFailure(error) - } - - override def onTransportConnected { - super.onTransportConnected - val login = new Login - login.security_token = securityToken - login.node_id = node_id - request_then(LOGIN_ACTION, login) { body => - on_login(Session.this) - } - } - - def disconnect(cb:Task) = queue { - send_replication_frame(DISCONNECT_ACTION, null) - transport.flush() - transport.stop(cb) - } - - def fail(msg:String) = { - error(msg) - transport.stop(NOOP) - } - - var handler: (AnyRef)=>Unit = response_handler - def onTransportCommand(command: AnyRef) = handler(command) - - def request_then(action:AsciiBuffer, body:AnyRef)(cb:(Buffer)=>Unit) = { - request(action, body){ response => - response.action match { - case OK_ACTION => - cb(response.body) - case ERROR_ACTION => - fail(action+" failed: "+response.body.ascii().toString) - case _ => - fail("Unexpected response action: "+response.action) - } - } - } - - def request(action:AsciiBuffer, body:AnyRef)(cb:(ReplicationFrame)=>Unit) = { - response_callbacks.addLast(cb) - send_replication_frame(action, body) - } - - def response_handler: (AnyRef)=>Unit = (command)=> { - command match { - case command:ReplicationFrame => - if( response_callbacks.isEmpty ) { - error("No response callback registered") - transport.stop(NOOP) - } else { - val callback = response_callbacks.removeFirst() - callback(command) - } - } - } - } - - def transfer_missing(state:SyncResponse) = { - - val dirty_index = client.dirtyIndexFile - dirty_index.recursiveDelete - - val snapshot_index = client.snapshotIndexFile(state.snapshot_position) - - val transport = new TcpTransport() - transport.setBlockingExecutor(blocking_executor) - transport.setDispatchQueue(queue) - transport.connecting(new URI(connect), null) - - debug("%s: Connecting download session. Snapshot index at: %s".format(directory, state.snapshot_position.toHexString)) - transfer_session = new Session(transport, (session)=> { - - var total_files = 0 - var total_size = 0L - var downloaded_size = 0L - var downloaded_files = 0 - - def update_download_status = { - status = "Attaching... Downloaded %.2f/%.2f kb and %d/%d files".format(downloaded_size/1024f, total_size/1024f, downloaded_files, total_files) - info(status) - } - - debug("Download session connected...") - - // Transfer the log files.. - var append_offset = 0L - for( x <- state.log_files ) { - - if( x.file == state.append_log ) { - append_offset = x.length - } - - val stashed_file: File = directory / "stash" / x.file - val target_file: File = directory / x.file - - def previously_downloaded:Boolean = { - if( !stashed_file.exists() ) - return false - - if (stashed_file.length() < x.length ) - return false - - if (stashed_file.length() == x.length ) - return stashed_file.cached_crc32 == x.crc32 - - if( x.file == state.append_log ) { - return false; - } - - return stashed_file.cached_crc32 == x.crc32 - } - - // We don't have to transfer log files that have been previously transferred. - if( previously_downloaded ) { - // lets link it from the stash directory.. - info("Slave skipping download of: log/"+x.file) - if( x.file == state.append_log ) { - stashed_file.copyTo(target_file) // let not link a file that's going to be modified.. - } else { - stashed_file.linkTo(target_file) - } - } else { - val transfer = new Transfer() - transfer.file = "log/"+x.file - transfer.offset = 0 - transfer.length = x.length - debug("Slave requested: "+transfer.file) - total_size += x.length - total_files += 1 - session.request_then(GET_ACTION, transfer) { body => - val buffer = map(target_file, 0, x.length, false) - session.codec.readData(buffer, ^{ - unmap(buffer) - trace("%s, Downloaded %s, offset:%d, length:%d", directory, transfer.file, transfer.offset, transfer.length) - downloaded_size += x.length - downloaded_files += 1 - update_download_status - }) - } - } - } - - // Transfer the index files.. - if( !state.index_files.isEmpty ) { - dirty_index.mkdirs() - } - for( x <- state.index_files ) { - val transfer = new Transfer() - transfer.file = snapshot_index.getName+"/"+x.file - transfer.offset = 0 - transfer.length = x.length - info("Slave requested: "+transfer.file) - total_size += x.length - total_files += 1 - session.request_then(GET_ACTION, transfer) { body => - val buffer = map(dirty_index / x.file, 0, x.length, false) - session.codec.readData(buffer, ^{ - unmap(buffer) - trace("%s, Downloaded %s, offset:%d, length:%d", directory, transfer.file, transfer.offset, transfer.length) - downloaded_size += x.length - downloaded_files += 1 - update_download_status - }) - } - } - - session.request_then(DISCONNECT_ACTION, null) { body => - // Ok we are now caught up. - status = "Attached" - info(status) - stash_clear(directory) // we don't need the stash anymore. - transport.stop(NOOP) - transfer_session = null - replay_from = state.snapshot_position - if( wal_append_position < state.wal_append_position ) { - wal_append_position = state.wal_append_position - wal_append_offset = append_offset - } - client.writeExecutor { - if( !state.index_files.isEmpty ) { - trace("%s: Index sync complete, copying to snapshot.", directory) - client.copyDirtyIndexToSnapshot(state.wal_append_position) - } - client.replay_init() - } - caughtUp = true - client.log.open(wal_append_offset) - send_wal_ack - for( i <- pending_log_removes ) { - client.log.delete(i); - } - pending_log_removes.clear() - } - }) - transfer_session.start - state.snapshot_position - } - - -} diff --git a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/TransportHandler.scala b/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/TransportHandler.scala deleted file mode 100644 index d516703aa2..0000000000 --- a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/TransportHandler.scala +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.activemq.leveldb.replicated - -import org.fusesource.hawtdispatch.transport.{TransportListener, DefaultTransportListener, Transport} -import java.util -import org.apache.activemq.leveldb.replicated.ReplicationSupport._ -import org.fusesource.hawtdispatch._ -import org.apache.activemq.leveldb.util.JsonCodec -import java.io.IOException -import org.fusesource.hawtbuf.AsciiBuffer - -/** - */ -abstract class TransportHandler(val transport: Transport) extends TransportListener { - - var outbound = new util.LinkedList[(AnyRef, ()=>Unit)]() - val codec = new ReplicationProtocolCodec - - transport.setProtocolCodec(codec) - transport.setTransportListener(this) - - def start = { - transport.start(NOOP) - } - - def onTransportConnected = transport.resumeRead() - def onTransportDisconnected() = {} - def onRefill = drain - def onTransportFailure(error: IOException) = transport.stop(NOOP) - - def drain:Unit = { - while( !outbound.isEmpty ) { - val (value, on_send) = outbound.peekFirst() - if( transport.offer(value) ) { - outbound.removeFirst() - if( on_send!=null ) { - on_send() - } - } else { - return - } - } - } - def send(value:AnyRef):Unit = send(value, null) - def send(value:AnyRef, on_send: ()=>Unit):Unit = { - transport.getDispatchQueue.assertExecuting() - outbound.add((value, on_send)) - drain - } - - def send_replication_frame(action:AsciiBuffer, body:AnyRef):Unit = send(new ReplicationFrame(action, if(body==null) null else JsonCodec.encode(body))) - def sendError(error:String) = send_replication_frame(ERROR_ACTION, error) - def sendOk(body:AnyRef) = send_replication_frame(OK_ACTION, body) - -} diff --git a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/groups/ChangeListener.scala b/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/groups/ChangeListener.scala deleted file mode 100644 index d76ade8fa1..0000000000 --- a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/groups/ChangeListener.scala +++ /dev/null @@ -1,107 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.activemq.leveldb.replicated.groups - -import org.slf4j.{Logger, LoggerFactory} -import java.util.concurrent.TimeUnit - - -/** - *- * Callback interface used to get notifications of changes - * to a cluster group. - *
- * - * @author Hiram Chirino - */ -trait ChangeListener { - def changed:Unit - def connected:Unit - def disconnected:Unit -} - -object ChangeListenerSupport { - val LOG: Logger = LoggerFactory.getLogger(classOf[ChangeListenerSupport]) -} -/** - *- *
- * - * @author Hiram Chirino - */ -trait ChangeListenerSupport { - - var listeners = List[ChangeListener]() - - def connected:Boolean - - def add(listener: ChangeListener): Unit = { - val connected = this.synchronized { - listeners ::= listener - this.connected - } - if (connected) { - listener.connected - } - } - - def remove(listener: ChangeListener): Unit = this.synchronized { - listeners = listeners.filterNot(_ == listener) - } - - def fireConnected() = { - val listeners = this.synchronized { this.listeners } - check_elapsed_time { - for (listener <- listeners) { - listener.connected - } - } - } - - def fireDisconnected() = { - val listeners = this.synchronized { this.listeners } - check_elapsed_time { - for (listener <- listeners) { - listener.disconnected - } - } - } - - def fireChanged() = { - val listeners = this.synchronized { this.listeners } - val start = System.nanoTime() - check_elapsed_time { - for (listener <- listeners) { - listener.changed - } - } - } - - def check_elapsed_time[T](func: => T):T = { - val start = System.nanoTime() - try { - func - } finally { - val end = System.nanoTime() - val elapsed = TimeUnit.NANOSECONDS.toMillis(end-start) - if( elapsed > 100 ) { - ChangeListenerSupport.LOG.warn("listeners are taking too long to process the events") - } - } - } - -} \ No newline at end of file diff --git a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/groups/ClusteredSingleton.scala b/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/groups/ClusteredSingleton.scala deleted file mode 100644 index a66d7b3153..0000000000 --- a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/groups/ClusteredSingleton.scala +++ /dev/null @@ -1,269 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.activemq.leveldb.replicated.groups - - -import collection.mutable.{ListBuffer, HashMap} - -import java.io._ -import com.fasterxml.jackson.databind.ObjectMapper -import collection.JavaConversions._ -import java.util.LinkedHashMap -import java.lang.{IllegalStateException, String} -import beans.BeanProperty -import com.fasterxml.jackson.annotation.JsonProperty -import org.apache.zookeeper.KeeperException.NoNodeException -import scala.reflect.ClassTag - -/** - * @author Hiram Chirino - */ -trait NodeState { - - /** - * The id of the cluster node. There can be multiple node with this ID, - * but only the first node in the cluster will be the master for for it. - */ - def id: String - - override - def toString = new String(ClusteredSupport.encode(this), "UTF-8") -} - -class TextNodeState extends NodeState { - @BeanProperty - @JsonProperty - var id:String = _ -} - -/** - * - * - * @author Hiram Chirino - */ -object ClusteredSupport { - - val DEFAULT_MAPPER = new ObjectMapper - - def decode[T](t : Class[T], buffer: Array[Byte], mapper: ObjectMapper=DEFAULT_MAPPER): T = decode(t, new ByteArrayInputStream(buffer), mapper) - def decode[T](t : Class[T], in: InputStream, mapper: ObjectMapper): T = mapper.readValue(in, t) - - def encode(value: AnyRef, mapper: ObjectMapper=DEFAULT_MAPPER): Array[Byte] = { - var baos: ByteArrayOutputStream = new ByteArrayOutputStream - encode(value, baos, mapper) - return baos.toByteArray - } - - def encode(value: AnyRef, out: OutputStream, mapper: ObjectMapper): Unit = { - mapper.writeValue(out, value) - } - -} - -/** - *- *
- * - * @author Hiram Chirino - */ -class ClusteredSingletonWatcher[T <: NodeState](val stateClass:Class[T]) extends ChangeListenerSupport { - import ClusteredSupport._ - - protected var _group:ZooKeeperGroup = _ - def group = _group - - /** - * Override to use a custom configured mapper. - */ - def mapper = ClusteredSupport.DEFAULT_MAPPER - - private val listener = new ChangeListener() { - def changed() { - val members = _group.members - val t = new LinkedHashMap[String, T]() - members.foreach { - case (path, data) => - try { - val value = decode(stateClass, data, mapper) - t.put(path, value) - } catch { - case e: Throwable => - e.printStackTrace() - } - } - changed_decoded(t) - } - - def connected = { - onConnected - changed - ClusteredSingletonWatcher.this.fireConnected - } - - def disconnected = { - onDisconnected - changed - ClusteredSingletonWatcher.this.fireDisconnected - } - } - - protected def onConnected = {} - protected def onDisconnected = {} - - def start(group:ZooKeeperGroup) = this.synchronized { - if(_group !=null ) - throw new IllegalStateException("Already started.") - _group = group - _group.add(listener) - } - - def stop = this.synchronized { - if(_group==null) - throw new IllegalStateException("Not started.") - _group.remove(listener) - _members = HashMap[String, ListBuffer[(String, T)]]() - _group = null - } - - def connected = this.synchronized { - if(_group==null) { - false - } else { - _group.connected - } - } - - protected var _members = HashMap[String, ListBuffer[(String, T)]]() - def members = this.synchronized { _members } - - def changed_decoded(m: LinkedHashMap[String, T]) = { - this.synchronized { - if( _group!=null ) { - _members = HashMap[String, ListBuffer[(String, T)]]() - m.foreach { case node => - _members.getOrElseUpdate(node._2.id, ListBuffer[(String, T)]()).append(node) - } - } - } - fireChanged - } - - def masters = this.synchronized { - _members.mapValues(_.head._2).toArray.map(_._2).toArray(new ClassTag[T] { - def runtimeClass = stateClass - override def erasure = stateClass - }) - } - -} -/** - *- *
- * - * @author Hiram Chirino - */ -class ClusteredSingleton[T <: NodeState ](stateClass:Class[T]) extends ClusteredSingletonWatcher[T](stateClass) { - import ClusteredSupport._ - - private var _eid:String = _ - /** the ephemeral id of the node is unique within in the group */ - def eid = _eid - - private var _state:T = _ - - override def stop = { - this.synchronized { - if(_state != null) { - leave - } - super.stop - } - } - - def join(state:T):Unit = this.synchronized { - if(state==null) - throw new IllegalArgumentException("State cannot be null") - if(state.id==null) - throw new IllegalArgumentException("The state id cannot be null") - if(_group==null) - throw new IllegalStateException("Not started.") - this._state = state - - while( connected ) { - if( _eid == null ) { - _eid = group.join(encode(state, mapper)) - return; - } else { - try { - _group.update(_eid, encode(state, mapper)) - return; - } catch { - case e:NoNodeException => - this._eid = null; - } - } - } - } - - def leave:Unit = this.synchronized { - if(this._state==null) - throw new IllegalStateException("Not joined") - if(_group==null) - throw new IllegalStateException("Not started.") - - this._state = null.asInstanceOf[T] - if( _eid!=null && connected ) { - _group.leave(_eid) - _eid = null - } - } - - override protected def onDisconnected { - } - - override protected def onConnected { - if( this._state!=null ) { - join(this._state) - } - } - - def isMaster:Boolean = this.synchronized { - if(this._state==null) - return false; - _members.get(this._state.id) match { - case Some(nodes) => - nodes.headOption.map { x=> - x._1 == _eid - }.getOrElse(false) - case None => false - } - } - - def master = this.synchronized { - if(this._state==null) - throw new IllegalStateException("Not joined") - _members.get(this._state.id).map(_.head._2) - } - - def slaves = this.synchronized { - if(this._state==null) - throw new IllegalStateException("Not joined") - val rc = _members.get(this._state.id).map(_.toList).getOrElse(List()) - rc.drop(1).map(_._2) - } - -} diff --git a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/groups/ZooKeeperGroup.scala b/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/groups/ZooKeeperGroup.scala deleted file mode 100644 index 99808ed0a2..0000000000 --- a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/replicated/groups/ZooKeeperGroup.scala +++ /dev/null @@ -1,207 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.activemq.leveldb.replicated.groups - -import org.apache.zookeeper._ -import org.linkedin.zookeeper.tracker._ -import scala.collection.mutable.HashMap -import org.linkedin.zookeeper.client.LifecycleListener -import collection.JavaConversions._ -import java.util.{LinkedHashMap, Collection} -import org.apache.zookeeper.KeeperException.{ConnectionLossException, NoNodeException} -import scala.Predef._ -import scala.Some - - -/** - *- *
- * - * @author Hiram Chirino - */ -object ZooKeeperGroupFactory { - - def create(zk: ZKClient, path: String):ZooKeeperGroup = new ZooKeeperGroup(zk, path) - def members(zk: ZKClient, path: String):LinkedHashMap[String, Array[Byte]] = ZooKeeperGroup.members(zk, path) -} - - -/** - * - * @author Hiram Chirino - */ -object ZooKeeperGroup { - def members(zk: ZKClient, path: String):LinkedHashMap[String, Array[Byte]] = { - var rc = new LinkedHashMap[String, Array[Byte]] - zk.getAllChildren(path).sortWith((a,b)=> a < b).foreach { node => - try { - if( node.matches("""0\d+""") ) { - rc.put(node, zk.getData(path+"/"+node)) - } else { - None - } - } catch { - case e:Throwable => - e.printStackTrace - } - } - rc - - } - - -} - -/** - * - * @author Hiram Chirino - */ -class ZooKeeperGroup(val zk: ZKClient, val root: String) extends LifecycleListener with ChangeListenerSupport { - - var tree = new ZooKeeperTreeTracker[Array[Byte]](zk, new ZKByteArrayDataReader, root, 1) - var rebuildTree = false - - val joins = HashMap[String, Int]() - - var members = new LinkedHashMap[String, Array[Byte]] - - private def member_path_prefix = root + "/0" - - zk.registerListener(this) - - create(root) - var treeEventHandler = new NodeEventsListener[Array[Byte]]() { - def onEvents(events: Collection[NodeEvent[Array[Byte]]]): Unit = { - if( !closed ) - fire_cluster_change; - } - } - tree.track(treeEventHandler) - fire_cluster_change - - @volatile - var closed = false - - def close = this.synchronized { - closed = true - joins.foreach { case (path, version) => - try { - if( zk.isConnected ) { - zk.delete(member_path_prefix + path, version) - } - } catch { - case x:NoNodeException => // Already deleted. - } - } - joins.clear - tree.destroy - zk.removeListener(this) - } - - def connected = zk.isConnected - def onConnected() = { - this.synchronized { - // underlying ZooKeeperTreeTracker isn't rebuilding itself after - // the loss of the session, so we need to destroy/rebuild it on - // reconnect. - if (rebuildTree) { - tree.destroy - tree = new ZooKeeperTreeTracker[Array[Byte]](zk, new ZKByteArrayDataReader, root, 1) - tree.track(treeEventHandler) - } else { - rebuildTree = true - } - } - fireConnected() - } - def onDisconnected() = { - this.members = new LinkedHashMap() - fireDisconnected() - } - - def join(data:Array[Byte]=null): String = this.synchronized { - val id = zk.createWithParents(member_path_prefix, data, CreateMode.EPHEMERAL_SEQUENTIAL).stripPrefix(member_path_prefix) - joins.put(id, 0) - id - } - - def update(path:String, data:Array[Byte]=null): Unit = this.synchronized { - joins.get(path) match { - case Some(ver) => - try { - val stat = zk.setData(member_path_prefix + path, data, ver) - joins.put(path, stat.getVersion) - } - catch { - case e:NoNodeException => - joins.remove(path) - throw e; - } - case None => throw new NoNodeException("Has not joined locally: "+path) - } - } - - def leave(path:String): Unit = this.synchronized { - joins.remove(path).foreach { - case version => - try { - zk.delete(member_path_prefix + path, version) - } catch { - case x: NoNodeException => // Already deleted. - case x: ConnectionLossException => // disconnected - } - } - } - - private def fire_cluster_change: Unit = { - this.synchronized { - val t = tree.getTree.toList.filterNot { x => - // don't include the root node, or nodes that don't match our naming convention. - (x._1 == root) || !x._1.stripPrefix(root).matches("""/0\d+""") - } - - this.members = new LinkedHashMap() - t.sortWith((a,b)=> a._1 < b._1 ).foreach { x=> - this.members.put(x._1.stripPrefix(member_path_prefix), x._2.getData) - } - } - fireChanged() - } - - private def create(path: String, count : java.lang.Integer = 0): Unit = { - try { - if (zk.exists(path, false) != null) { - return - } - try { - // try create given path in persistent mode - zk.createOrSetWithParents(path, "", CreateMode.PERSISTENT) - } catch { - case ignore: KeeperException.NodeExistsException => - } - } catch { - case ignore : KeeperException.SessionExpiredException => { - if (count > 20) { - // we tried enought number of times - throw new IllegalStateException("Cannot create path " + path, ignore) - } - // try to create path with increased counter value - create(path, count + 1) - } - } - } -} diff --git a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/util/FileSupport.scala b/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/util/FileSupport.scala deleted file mode 100644 index b004c8c994..0000000000 --- a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/util/FileSupport.scala +++ /dev/null @@ -1,323 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.activemq.leveldb.util - -import java.io._ -import org.fusesource.hawtdispatch._ -import org.apache.activemq.leveldb.LevelDBClient -import org.fusesource.leveldbjni.internal.Util -import org.apache.activemq.leveldb.util.ProcessSupport._ -import java.util.zip.CRC32 - -object FileSupport { - - implicit def toRichFile(file:File):RichFile = new RichFile(file) - - val onWindows = System.getProperty("os.name").toLowerCase().startsWith("windows") - private var linkStrategy = 0 - private val LOG = Log(getClass) - - def link(source:File, target:File):Unit = { - linkStrategy match { - case 0 => - // We first try to link via a native system call. Fails if - // we cannot load the JNI module. - try { - Util.link(source, target) - } catch { - case e:IOException => throw e - case e:Throwable => - // Fallback.. to a slower impl.. - LOG.debug("Native link system call not available") - linkStrategy = 5 - link(source, target) - } - - // TODO: consider implementing a case which does the native system call using JNA - - case 5 => - // Next we try to do the link by executing an - // operating system shell command - try { - if( onWindows ) { - system("fsutil", "hardlink", "create", target.getCanonicalPath, source.getCanonicalPath) match { - case(0, _, _) => // Success - case (_, out, err) => - // TODO: we might want to look at the out/err to see why it failed - // to avoid falling back to the slower strategy. - LOG.debug("fsutil OS command not available either") - linkStrategy = 10 - link(source, target) - } - } else { - system("ln", source.getCanonicalPath, target.getCanonicalPath) match { - case(0, _, _) => // Success - case (_, out, err) => None - // TODO: we might want to look at the out/err to see why it failed - // to avoid falling back to the slower strategy. - LOG.debug("ln OS command not available either") - linkStrategy = 2 - link(source, target) - } - } - } catch { - case e:Throwable => - } - case _ => - // this final strategy is slow but sure to work. - source.copyTo(target) - } - } - - def systemDir(name:String) = { - val baseValue = System.getProperty(name) - if( baseValue==null ) { - sys.error("The the %s system property is not set.".format(name)) - } - val file = new File(baseValue) - if( !file.isDirectory ) { - sys.error("The the %s system property is not set to valid directory path %s".format(name, baseValue)) - } - file - } - - case class RichFile(self:File) { - - def / (path:String) = new File(self, path) - - def linkTo(target:File) = link(self, target) - - def copyTo(target:File) = { - using(new FileOutputStream(target)){ os=> - using(new FileInputStream(self)){ is=> - FileSupport.copy(is, os) - } - } - } - - def crc32(limit:Long=Long.MaxValue) = { - val checksum = new CRC32(); - var remaining = limit; - using(new FileInputStream(self)) { in => - val data = new Array[Byte](1024*4) - var count = in.read(data, 0, remaining.min(data.length).toInt) - while( count > 0 ) { - remaining -= count - checksum.update(data, 0, count); - count = in.read(data, 0, remaining.min(data.length).toInt) - } - } - checksum.getValue() - } - - def cached_crc32 = { - val crc32_file = new File(self.getParentFile, self.getName+".crc32") - if( crc32_file.exists() && crc32_file.lastModified() > self.lastModified() ) { - crc32_file.readText().trim.toLong - } else { - val rc = crc32() - crc32_file.writeText(rc.toString) - rc - } - } - - def list_files:Array[File] = { - Option(self.listFiles()).getOrElse(Array()) - } - - def recursiveList:List[File] = { - if( self.isDirectory ) { - self :: self.listFiles.toList.flatten( _.recursiveList ) - } else { - self :: Nil - } - } - - def recursiveDelete: Unit = { - if( self.exists ) { - if( self.isDirectory ) { - self.listFiles.foreach(_.recursiveDelete) - } - self.delete - } - } - - def recursiveCopyTo(target: File) : Unit = { - if (self.isDirectory) { - target.mkdirs - self.listFiles.foreach( file=> file.recursiveCopyTo( target / file.getName) ) - } else { - self.copyTo(target) - } - } - - def readText(charset:String="UTF-8"): String = { - using(new FileInputStream(self)) { in => - FileSupport.readText(in, charset) - } - } - - def readBytes: Array[Byte] = { - using(new FileInputStream(self)) { in => - FileSupport.readBytes(in) - } - } - - def writeBytes(data:Array[Byte]):Unit = { - using(new FileOutputStream(self)) { out => - FileSupport.writeBytes(out, data) - } - } - - def writeText(data:String, charset:String="UTF-8"):Unit = { - using(new FileOutputStream(self)) { out => - FileSupport.writeText(out, data, charset) - } - } - - } - - /** - * Returns the number of bytes copied. - */ - def copy(in: InputStream, out: OutputStream): Long = { - var bytesCopied: Long = 0 - val buffer = new Array[Byte](8192) - var bytes = in.read(buffer) - while (bytes >= 0) { - out.write(buffer, 0, bytes) - bytesCopied += bytes - bytes = in.read(buffer) - } - bytesCopied - } - - def using[R,C <: Closeable](closable: C)(proc: C=>R) = { - try { - proc(closable) - } finally { - try { closable.close } catch { case ignore:Throwable => } - } - } - - def readText(in: InputStream, charset:String="UTF-8"): String = { - new String(readBytes(in), charset) - } - - def readBytes(in: InputStream): Array[Byte] = { - val out = new ByteArrayOutputStream() - copy(in, out) - out.toByteArray - } - - def writeText(out: OutputStream, value: String, charset:String="UTF-8"): Unit = { - writeBytes(out, value.getBytes(charset)) - } - - def writeBytes(out: OutputStream, data: Array[Byte]): Unit = { - copy(new ByteArrayInputStream(data), out) - } - -} - -object ProcessSupport { - import FileSupport._ - - implicit def toRichProcessBuilder(self:ProcessBuilder):RichProcessBuilder = new RichProcessBuilder(self) - - case class RichProcessBuilder(self:ProcessBuilder) { - - def start(out:OutputStream=null, err:OutputStream=null, in:InputStream=null) = { - self.redirectErrorStream(out == err) - val process = self.start - if( in!=null ) { - LevelDBClient.THREAD_POOL { - try { - using(process.getOutputStream) { out => - FileSupport.copy(in, out) - } - } catch { - case _ : Throwable => - } - } - } else { - process.getOutputStream.close - } - - if( out!=null ) { - LevelDBClient.THREAD_POOL { - try { - using(process.getInputStream) { in => - FileSupport.copy(in, out) - } - } catch { - case _ : Throwable => - } - } - } else { - process.getInputStream.close - } - - if( err!=null && err!=out ) { - LevelDBClient.THREAD_POOL { - try { - using(process.getErrorStream) { in => - FileSupport.copy(in, err) - } - } catch { - case _ : Throwable => - } - } - } else { - process.getErrorStream.close - } - process - } - - } - - implicit def toRichProcess(self:Process):RichProcess = new RichProcess(self) - - case class RichProcess(self:Process) { - def onExit(func: (Int)=>Unit) = LevelDBClient.THREAD_POOL { - self.waitFor - func(self.exitValue) - } - } - - implicit def toProcessBuilder(args:Seq[String]):ProcessBuilder = new ProcessBuilder().command(args : _*) - - def launch(command:String*)(func: (Int, Array[Byte], Array[Byte])=>Unit ):Unit = launch(command)(func) - def launch(p:ProcessBuilder, in:InputStream=null)(func: (Int, Array[Byte], Array[Byte]) => Unit):Unit = { - val out = new ByteArrayOutputStream - val err = new ByteArrayOutputStream - p.start(out, err, in).onExit { code=> - func(code, out.toByteArray, err.toByteArray) - } - } - - def system(command:String*):(Int, Array[Byte], Array[Byte]) = system(command) - def system(p:ProcessBuilder, in:InputStream=null):(Int, Array[Byte], Array[Byte]) = { - val out = new ByteArrayOutputStream - val err = new ByteArrayOutputStream - val process = p.start(out, err, in) - process.waitFor - (process.exitValue, out.toByteArray, err.toByteArray) - } - -} \ No newline at end of file diff --git a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/util/JsonCodec.scala b/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/util/JsonCodec.scala deleted file mode 100644 index 11b209b2ee..0000000000 --- a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/util/JsonCodec.scala +++ /dev/null @@ -1,60 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.activemq.leveldb.util - -import com.fasterxml.jackson.databind.ObjectMapper -import org.fusesource.hawtbuf.{ByteArrayOutputStream, Buffer} -import java.io.InputStream - -/** - * - * - * @author Hiram Chirino - */ -object JsonCodec { - - final val mapper: ObjectMapper = new ObjectMapper - - def decode[T](buffer: Buffer, clazz: Class[T]): T = { - val original = Thread.currentThread.getContextClassLoader - Thread.currentThread.setContextClassLoader(this.getClass.getClassLoader) - try { - return mapper.readValue(buffer.in, clazz) - } finally { - Thread.currentThread.setContextClassLoader(original) - } - } - - def decode[T](is: InputStream, clazz : Class[T]): T = { - var original: ClassLoader = Thread.currentThread.getContextClassLoader - Thread.currentThread.setContextClassLoader(this.getClass.getClassLoader) - try { - return JsonCodec.mapper.readValue(is, clazz) - } - finally { - Thread.currentThread.setContextClassLoader(original) - } - } - - - def encode(value: AnyRef): Buffer = { - var baos = new ByteArrayOutputStream - mapper.writeValue(baos, value) - return baos.toBuffer - } - -} \ No newline at end of file diff --git a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/util/Log.scala b/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/util/Log.scala deleted file mode 100644 index cc0d2d3e9b..0000000000 --- a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/util/Log.scala +++ /dev/null @@ -1,146 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.activemq.leveldb.util - -import java.util.concurrent.atomic.AtomicLong -import org.slf4j.{MDC, Logger, LoggerFactory} -import java.lang.{Throwable, String} - -/** - * @author Hiram Chirino - */ -object Log { - - def apply(clazz:Class[_]):Log = apply(clazz.getName.stripSuffix("$")) - - def apply(name:String):Log = new Log { - override val log = LoggerFactory.getLogger(name) - } - - def apply(value:Logger):Log = new Log { - override val log = value - } -} - -/** - * @author Hiram Chirino - */ -trait Log { - import Log._ - val log = LoggerFactory.getLogger(getClass.getName.stripSuffix("$")) - - private def format(message:String, args:Seq[Any]) = { - if( args.isEmpty ) { - message - } else { - message.format(args.map(_.asInstanceOf[AnyRef]) : _*) - } - } - - def error(m: => String, args:Any*): Unit = { - if( log.isErrorEnabled ) { - log.error(format(m, args.toSeq)) - } - } - - def error(e: Throwable, m: => String, args:Any*): Unit = { - if( log.isErrorEnabled ) { - log.error(format(m, args.toSeq), e) - } - } - - def error(e: Throwable): Unit = { - if( log.isErrorEnabled ) { - log.error(e.getMessage, e) - } - } - - def warn(m: => String, args:Any*): Unit = { - if( log.isWarnEnabled ) { - log.warn(format(m, args.toSeq)) - } - } - - def warn(e: Throwable, m: => String, args:Any*): Unit = { - if( log.isWarnEnabled ) { - log.warn(format(m, args.toSeq), e) - } - } - - def warn(e: Throwable): Unit = { - if( log.isWarnEnabled ) { - log.warn(e.toString, e) - } - } - - def info(m: => String, args:Any*): Unit = { - if( log.isInfoEnabled ) { - log.info(format(m, args.toSeq)) - } - } - - def info(e: Throwable, m: => String, args:Any*): Unit = { - if( log.isInfoEnabled ) { - log.info(format(m, args.toSeq), e) - } - } - - def info(e: Throwable): Unit = { - if( log.isInfoEnabled ) { - log.info(e.toString, e) - } - } - - - def debug(m: => String, args:Any*): Unit = { - if( log.isDebugEnabled ) { - log.debug(format(m, args.toSeq)) - } - } - - def debug(e: Throwable, m: => String, args:Any*): Unit = { - if( log.isDebugEnabled ) { - log.debug(format(m, args.toSeq), e) - } - } - - def debug(e: Throwable): Unit = { - if( log.isDebugEnabled ) { - log.debug(e.toString, e) - } - } - - def trace(m: => String, args:Any*): Unit = { - if( log.isTraceEnabled ) { - log.trace(format(m, args.toSeq)) - } - } - - def trace(e: Throwable, m: => String, args:Any*): Unit = { - if( log.isTraceEnabled ) { - log.trace(format(m, args.toSeq), e) - } - } - - def trace(e: Throwable): Unit = { - if( log.isTraceEnabled ) { - log.trace(e.toString, e) - } - } - -} \ No newline at end of file diff --git a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/util/LongCounter.scala b/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/util/LongCounter.scala deleted file mode 100644 index e6a5a6f034..0000000000 --- a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/util/LongCounter.scala +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.activemq.leveldb.util - -/** - *- *
- * - * @author Hiram Chirino - */ -class LongCounter(private var value:Long = 0) extends Serializable { - - def clear() = value=0 - def get() = value - def set(value:Long) = this.value = value - - def incrementAndGet() = addAndGet(1) - def decrementAndGet() = addAndGet(-1) - def addAndGet(amount:Long) = { - value+=amount - value - } - - def getAndIncrement() = getAndAdd(1) - def getAndDecrement() = getAndAdd(-11) - def getAndAdd(amount:Long) = { - val rc = value - value+=amount - rc - } - - override def toString() = get().toString -} \ No newline at end of file diff --git a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/util/TimeMetric.scala b/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/util/TimeMetric.scala deleted file mode 100644 index 5d4916299a..0000000000 --- a/activemq-leveldb-store/src/main/scala/org/apache/activemq/leveldb/util/TimeMetric.scala +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.activemq.leveldb.util - -case class TimeMetric() { - var max = 0L - - def add(duration:Long) = this.synchronized { - max = max.max(duration) - } - - def get = { - this.synchronized { - max - } / 1000000.0 - } - def reset = { - this.synchronized { - val rc = max - max = 0 - rc - } / 1000000.0 - } - - def apply[T](func: =>T):T = { - val start = System.nanoTime() - try { - func - } finally { - add(System.nanoTime() - start) - } - } - -} - diff --git a/activemq-leveldb-store/src/main/scala/org/apache/activemq/store/leveldb/package.html b/activemq-leveldb-store/src/main/scala/org/apache/activemq/store/leveldb/package.html deleted file mode 100755 index 8eb826d45d..0000000000 --- a/activemq-leveldb-store/src/main/scala/org/apache/activemq/store/leveldb/package.html +++ /dev/null @@ -1,27 +0,0 @@ - - - - - - -- Stub for the LevelDB store implementation from https://github.com/fusesource/fuse-extra/tree/master/fusemq-leveldb -
- - - diff --git a/activemq-leveldb-store/src/test/java/org/apache/activemq/leveldb/test/ElectingLevelDBStoreTest.java b/activemq-leveldb-store/src/test/java/org/apache/activemq/leveldb/test/ElectingLevelDBStoreTest.java deleted file mode 100644 index 95376d65b3..0000000000 --- a/activemq-leveldb-store/src/test/java/org/apache/activemq/leveldb/test/ElectingLevelDBStoreTest.java +++ /dev/null @@ -1,306 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.activemq.leveldb.test; - -import org.apache.activemq.Service; -import org.apache.activemq.command.ActiveMQQueue; -import org.apache.activemq.leveldb.CountDownFuture; -import org.apache.activemq.leveldb.LevelDBStore; -import org.apache.activemq.leveldb.replicated.ElectingLevelDBStore; -import org.apache.activemq.store.MessageStore; -import org.apache.commons.io.FileUtils; -import org.junit.After; -import org.junit.Ignore; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.concurrent.TimeUnit; -import static org.apache.activemq.leveldb.test.ReplicationTestSupport.*; -import static org.junit.Assert.*; - -/** - */ -public class ElectingLevelDBStoreTest extends ZooKeeperTestSupport { - - protected static final Logger LOG = LoggerFactory.getLogger(ElectingLevelDBStoreTest.class); - ArrayList- *
- * - * @author Hiram Chirino - */ -class DFSLevelDBStore extends LevelDBStore { - - @BeanProperty - var dfsUrl:String = _ - @BeanProperty - var dfsConfig:String = _ - @BeanProperty - var dfsDirectory:String = _ - @BeanProperty - var dfsBlockSize = 1024*1024*50L - @BeanProperty - var dfsReplication = 1 - @BeanProperty - var containerId:String = _ - - var dfs:FileSystem = _ - - override def doStart = { - if(dfs==null) { - Thread.currentThread().setContextClassLoader(getClass.getClassLoader) - val config = new Configuration() - config.set("fs.hdfs.impl.disable.cache", "true") - config.set("fs.file.impl.disable.cache", "true") - Option(dfsConfig).foreach(config.addResource(_)) - Option(dfsUrl).foreach(config.set("fs.default.name", _)) - dfsUrl = config.get("fs.default.name") - dfs = FileSystem.get(config) - } - if ( containerId==null ) { - containerId = InetAddress.getLocalHost.getHostName - } - super.doStart - } - - override def doStop(stopper: ServiceStopper): Unit = { - super.doStop(stopper) - if(dfs!=null){ - dfs.close() - } - } - - override def createClient = new DFSLevelDBClient(this) -} \ No newline at end of file diff --git a/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/dfs/IndexManifestDTO.java b/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/dfs/IndexManifestDTO.java deleted file mode 100644 index e6c324a7d6..0000000000 --- a/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/dfs/IndexManifestDTO.java +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.activemq.leveldb.dfs; - -import javax.xml.bind.annotation.XmlAccessType; -import javax.xml.bind.annotation.XmlAccessorType; -import javax.xml.bind.annotation.XmlAttribute; -import javax.xml.bind.annotation.XmlRootElement; -import java.util.HashSet; -import java.util.Set; - -/** - * @author Hiram Chirino - */ -@XmlRootElement(name="index_files") -@XmlAccessorType(XmlAccessType.FIELD) -public class IndexManifestDTO { - - @XmlAttribute(name = "snapshot_id") - public long snapshot_id; - - @XmlAttribute(name = "current_manifest") - public String current_manifest; - - @XmlAttribute(name = "file") - public Set- * ActiveMQ implementation of the JMS Scenario class. - *
- * - * @author Hiram Chirino - */ -class ActiveMQScenario extends JMSClientScenario { - - override protected def factory:ConnectionFactory = { - val rc = new ActiveMQConnectionFactory - rc.setBrokerURL(url) - rc - } - - override protected def destination(i:Int):Destination = destination_type match { - case "queue" => new ActiveMQQueue(indexed_destination_name(i)) - case "topic" => new ActiveMQTopic(indexed_destination_name(i)) - case _ => sys.error("Unsuported destination type: "+destination_type) - } - -} diff --git a/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/DFSLevelDBFastEnqueueTest.scala b/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/DFSLevelDBFastEnqueueTest.scala deleted file mode 100644 index 3de3dec2b5..0000000000 --- a/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/DFSLevelDBFastEnqueueTest.scala +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.activemq.leveldb.test - -import org.apache.hadoop.fs.FileUtil -import java.io.File -import java.util.concurrent.TimeUnit -import org.apache.activemq.leveldb.{LevelDBStore} -import org.apache.activemq.leveldb.dfs.DFSLevelDBStore - -/** - *- *
- * - * @author Hiram Chirino - */ -class DFSLevelDBFastEnqueueTest extends LevelDBFastEnqueueTest { - - override def setUp: Unit = { - TestingHDFSServer.start - super.setUp - } - - override def tearDown: Unit = { - super.tearDown - TestingHDFSServer.stop - } - - override protected def createStore: LevelDBStore = { - var store: DFSLevelDBStore = new DFSLevelDBStore - store.setDirectory(dataDirectory) - store.setDfsDirectory("target/activemq-data/hdfs-leveldb") - return store - } - - private def dataDirectory: File = { - return new File("target/activemq-data/leveldb") - } - - /** - * On restart we will also delete the local file system store, so that we test restoring from - * HDFS. - */ - override protected def restartBroker(restartDelay: Int, checkpoint: Int): Unit = { - stopBroker - FileUtil.fullyDelete(dataDirectory) - TimeUnit.MILLISECONDS.sleep(restartDelay) - startBroker(false, checkpoint) - } -} \ No newline at end of file diff --git a/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/DFSLevelDBStoreTest.scala b/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/DFSLevelDBStoreTest.scala deleted file mode 100644 index 04e329f9a1..0000000000 --- a/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/DFSLevelDBStoreTest.scala +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.activemq.leveldb.test - -import org.apache.activemq.store.PersistenceAdapter -import java.io.File -import org.apache.activemq.leveldb.dfs.DFSLevelDBStore - -/** - *- *
- * - * @author Hiram Chirino - */ -class DFSLevelDBStoreTest extends LevelDBStoreTest { - override protected def setUp: Unit = { - TestingHDFSServer.start - super.setUp - } - - override protected def tearDown: Unit = { - super.tearDown - TestingHDFSServer.stop - } - - override protected def createPersistenceAdapter(delete: Boolean): PersistenceAdapter = { - var store: DFSLevelDBStore = new DFSLevelDBStore - store.setDirectory(new File("target/activemq-data/haleveldb")) - store.setDfsDirectory("localhost") - if (delete) { - store.deleteAllMessages - } - return store - } -} \ No newline at end of file diff --git a/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/EnqueueRateScenariosTest.scala b/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/EnqueueRateScenariosTest.scala deleted file mode 100644 index e185cc8645..0000000000 --- a/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/EnqueueRateScenariosTest.scala +++ /dev/null @@ -1,175 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.activemq.leveldb.test - -import junit.framework.TestCase -import org.apache.activemq.broker._ -import org.apache.activemq.store._ -import java.io.File -import junit.framework.Assert._ -import org.apache.commons.math.stat.descriptive.DescriptiveStatistics -import region.policy.{PolicyEntry, PolicyMap} -import org.apache.activemq.leveldb.{LevelDBStore} - -/** - *- *
- * - * @author Hiram Chirino - */ -class EnqueueRateScenariosTest extends TestCase { - - var broker: BrokerService = null - - override def setUp() { - import collection.JavaConversions._ - broker = new BrokerService - broker.setDeleteAllMessagesOnStartup(true) - broker.setPersistenceAdapter(createStore) - broker.addConnector("tcp://0.0.0.0:0") -// val policies = new PolicyMap(); -// val entry = new PolicyEntry -// entry.setQueue(">") -// policies.setPolicyEntries(List(entry)) -// broker.setDestinationPolicy(policies) - broker.start - broker.waitUntilStarted() - } - - override def tearDown() = { - if (broker != null) { - broker.stop - broker.waitUntilStopped - } - } - - protected def canceledEnqueues() = - broker.getPersistenceAdapter.asInstanceOf[LevelDBStore].db.uowCanceledCounter - - protected def enqueueOptimized() = - broker.getPersistenceAdapter.asInstanceOf[LevelDBStore].db.uowEnqueueDelayReqested - - protected def enqueueNotOptimized() = - broker.getPersistenceAdapter.asInstanceOf[LevelDBStore].db.uowEnqueueNodelayReqested - - - protected def createStore: PersistenceAdapter = { - var store: LevelDBStore = new LevelDBStore - store.setDirectory(new File("target/activemq-data/leveldb")) - return store - } - - def collect_benchmark(scenario:ActiveMQScenario, warmup:Int, samples_count:Int) = { - val (cancels, optimized, unoptimized) = scenario.with_load { - println("Warming up for %d seconds...".format(warmup)) - Thread.sleep(warmup*1000) - println("Sampling...") - scenario.collection_start - val cancelStart = canceledEnqueues - val enqueueOptimizedStart = enqueueOptimized - val enqueueNotOptimizedStart = enqueueNotOptimized - for (i <- 0 until samples_count) { - Thread.sleep(1000); - scenario.collection_sample - } - (canceledEnqueues-cancelStart, enqueueOptimized-enqueueOptimizedStart, enqueueNotOptimized-enqueueNotOptimizedStart) - } - println("Done.") - - var samples = scenario.collection_end - val error_rates = samples.get("e_custom").get.map(_._2) - assertFalse("Errors occured during scenario run: "+error_rates, error_rates.find(_ > 0 ).isDefined ) - - val producer_stats = new DescriptiveStatistics(); - for( producer_rates <- samples.get("p_custom") ) { - for( i <- producer_rates ) { - producer_stats.addValue(i._2) - } - } - - val consumer_stats = new DescriptiveStatistics(); - for( consumer_rates <- samples.get("c_custom") ) { - for( i <- consumer_rates ) { - consumer_stats.addValue(i._2) - } - } - - (producer_stats, consumer_stats, cancels*1.0/samples_count, optimized*1.0/samples_count, unoptimized*1.0/samples_count) - } - - def benchmark(name:String, warmup:Int=3, samples_count:Int=15, async_send:Boolean=true)(setup:(ActiveMQScenario)=>Unit) = { - println("Benchmarking: "+name) - var options: String = "?jms.watchTopicAdvisories=false&jms.useAsyncSend="+async_send - val url = broker.getTransportConnectors.get(0).getConnectUri + options - - val scenario = new ActiveMQScenario - scenario.url = url - scenario.display_errors = true - scenario.persistent = true - scenario.message_size = 1024 * 3 - - setup(scenario) - val (producer_stats, consumer_stats, cancels, optimized, unoptimized) = collect_benchmark(scenario, warmup, samples_count) - - println("%s: producer avg msg/sec: %,.2f, stddev: %,.2f".format(name, producer_stats.getMean, producer_stats.getStandardDeviation)) - println("%s: consumer avg msg/sec: %,.2f, stddev: %,.2f".format(name, consumer_stats.getMean, consumer_stats.getStandardDeviation)) - println("%s: canceled enqueues/sec: %,.2f".format(name,cancels)) - println("%s: optimized enqueues/sec: %,.2f".format(name,optimized)) - println("%s: unoptimized enqueues/sec: %,.2f".format(name,unoptimized)) - - (producer_stats, consumer_stats, cancels, optimized, unoptimized) - } - - def testHighCancelRatio = { - val (producer_stats, consumer_stats, cancels, optimized, unoptimized) = benchmark("both_connected_baseline") { scenario=> - scenario.producers = 1 - scenario.consumers = 1 - } - val cancel_ratio = cancels / producer_stats.getMean - assertTrue("Expecting more than 80%% of the enqueues get canceled. But only %.2f%% was canceled".format(cancel_ratio*100), cancel_ratio > .80) - } - - def testDecoupledProducerRate = { - - // Fill up the queue with messages.. for the benefit of the next benchmark.. - val from_1_to_0 = benchmark("from_1_to_0", 60) { scenario=> - scenario.producers = 1 - scenario.consumers = 0 - } - val from_1_to_10 = benchmark("from_1_to_10") { scenario=> - scenario.producers = 1 - scenario.consumers = 10 - } - val from_1_to_1 = benchmark("from_1_to_1") { scenario=> - scenario.producers = 1 - scenario.consumers = 1 - } - - var percent_diff0 = (1.0 - (from_1_to_0._1.getMean / from_1_to_1._1.getMean)).abs * 100 - var percent_diff1 = (1.0 - (from_1_to_1._1.getMean / from_1_to_10._1.getMean)).abs * 100 - - var msg0 = "The 0 vs 1 consumer scenario producer rate was within %.2f%%".format(percent_diff0) - var msg1 = "The 1 vs 10 consumer scenario producer rate was within %.2f%%".format(percent_diff1) - - println(msg0) - println(msg1) - - assertTrue(msg0, percent_diff0 <= 60) - assertTrue(msg1, percent_diff1 <= 20) - } - -} \ No newline at end of file diff --git a/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/IDERunner.java b/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/IDERunner.java deleted file mode 100644 index ddf01d3749..0000000000 --- a/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/IDERunner.java +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.activemq.leveldb.test; - -import org.apache.activemq.broker.BrokerService; -import org.apache.activemq.leveldb.LevelDBStore; - -import java.io.File; - -public class IDERunner { - - public static void main(String[]args) throws Exception { - BrokerService bs = new BrokerService(); - bs.addConnector("tcp://localhost:61616"); - LevelDBStore store = new LevelDBStore(); - store.setDirectory(new File("target/activemq-data/haleveldb")); - bs.setPersistenceAdapter(store); - bs.deleteAllMessages(); - bs.start(); - bs.waitUntilStopped(); - } -} diff --git a/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/JMSClientScenario.scala b/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/JMSClientScenario.scala deleted file mode 100644 index d852396d23..0000000000 --- a/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/JMSClientScenario.scala +++ /dev/null @@ -1,204 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.activemq.leveldb.test - -import java.lang.Thread -import javax.jms._ - -/** - *- * Simulates load on a JMS sever using the JMS messaging API. - *
- * - * @author Hiram Chirino - */ -abstract class JMSClientScenario extends Scenario { - - def createProducer(i:Int) = { - new ProducerClient(i) - } - def createConsumer(i:Int) = { - new ConsumerClient(i) - } - - protected def destination(i:Int):Destination - - def indexed_destination_name(i:Int) = destination_type match { - case "queue" => queue_prefix+destination_name+"-"+(i%destination_count) - case "topic" => topic_prefix+destination_name+"-"+(i%destination_count) - case _ => sys.error("Unsuported destination type: "+destination_type) - } - - - protected def factory:ConnectionFactory - - def jms_ack_mode = { - ack_mode match { - case "auto" => Session.AUTO_ACKNOWLEDGE - case "client" => Session.CLIENT_ACKNOWLEDGE - case "dups_ok" => Session.DUPS_OK_ACKNOWLEDGE - case "transacted" => Session.SESSION_TRANSACTED - case _ => throw new Exception("Invalid ack mode: "+ack_mode) - } - } - - trait JMSClient extends Client { - - @volatile - var connection:Connection = _ - var message_counter=0L - - var worker = new Thread() { - override def run() { - var reconnect_delay = 0 - while( !done.get ) { - try { - - if( reconnect_delay!=0 ) { - Thread.sleep(reconnect_delay) - reconnect_delay=0 - } - connection = factory.createConnection(user_name, password) -// connection.setClientID(name) - connection.setExceptionListener(new ExceptionListener { - def onException(exception: JMSException) { - } - }) - connection.start() - - execute - - } catch { - case e:Throwable => - if( !done.get ) { - if( display_errors ) { - e.printStackTrace - } - error_counter.incrementAndGet - reconnect_delay = 1000 - } - } finally { - dispose - } - } - } - } - - def dispose { - try { - connection.close() - } catch { - case _:Throwable => - } - } - - def execute:Unit - - def start = { - worker.start - } - - def shutdown = { - assert(done.get) - if ( worker!=null ) { - dispose - worker.join(1000) - while(worker.isAlive ) { - println("Worker did not shutdown quickly.. interrupting thread.") - worker.interrupt() - worker.join(1000) - } - worker = null - } - } - - def name:String - } - - class ConsumerClient(val id: Int) extends JMSClient { - val name: String = "consumer " + id - - def execute { - var session = connection.createSession(false, jms_ack_mode) - var consumer:MessageConsumer = if( durable ) { - session.createDurableSubscriber(destination(id).asInstanceOf[Topic], name, selector, no_local) - } else { - session.createConsumer(destination(id), selector, no_local) - } - - while( !done.get() ) { - val msg = consumer.receive(500) - if( msg!=null ) { - consumer_counter.incrementAndGet() - if (consumer_sleep != 0) { - Thread.sleep(consumer_sleep) - } - if(session.getAcknowledgeMode == Session.CLIENT_ACKNOWLEDGE) { - msg.acknowledge(); - } - } - } - } - - } - - class ProducerClient(val id: Int) extends JMSClient { - - val name: String = "producer " + id - - def execute { - val session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE) - val producer:MessageProducer = session.createProducer(destination(id)) - producer.setDeliveryMode(if( persistent ) { - DeliveryMode.PERSISTENT - } else { - DeliveryMode.NON_PERSISTENT - }) - - val msg = session.createTextMessage(body(name)) - headers_for(id).foreach { case (key, value) => - msg.setStringProperty(key, value) - } - - while( !done.get() ) { - producer.send(msg) - producer_counter.incrementAndGet() - if (producer_sleep != 0) { - Thread.sleep(producer_sleep) - } - } - - } - } - - def body(name:String) = { - val buffer = new StringBuffer(message_size) - buffer.append("Message from " + name+"\n") - for( i <- buffer.length to message_size ) { - buffer.append(('a'+(i%26)).toChar) - } - var rc = buffer.toString - if( rc.length > message_size ) { - rc.substring(0, message_size) - } else { - rc - } - } - - - -} diff --git a/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/LevelDBFastEnqueueTest.scala b/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/LevelDBFastEnqueueTest.scala deleted file mode 100644 index b63b920f3d..0000000000 --- a/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/LevelDBFastEnqueueTest.scala +++ /dev/null @@ -1,209 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.activemq.leveldb.test - -import org.apache.activemq.ActiveMQConnection -import org.apache.activemq.ActiveMQConnectionFactory -import org.apache.activemq.broker.BrokerService -import org.apache.activemq.command.ActiveMQQueue -import org.apache.activemq.command.ConnectionControl -import org.junit.After -import org.junit.Before -import org.junit.Test -import javax.jms._ -import java.io.File -import java.util.Vector -import java.util.concurrent.ExecutorService -import java.util.concurrent.Executors -import java.util.concurrent.TimeUnit -import java.util.concurrent.atomic.AtomicLong -import junit.framework.Assert._ -import org.apache.activemq.leveldb.util.Log -import junit.framework.TestCase -import org.apache.activemq.leveldb.LevelDBStore - -object LevelDBFastEnqueueTest extends Log -class LevelDBFastEnqueueTest extends TestCase { - - import LevelDBFastEnqueueTest._ - - @Test def testPublishNoConsumer: Unit = { - startBroker(true, 10) - val sharedCount: AtomicLong = new AtomicLong(toSend) - var start: Long = System.currentTimeMillis - var executorService: ExecutorService = Executors.newCachedThreadPool - var i: Int = 0 - while (i < parallelProducer) { - executorService.execute(new Runnable { - def run: Unit = { - try { - publishMessages(sharedCount, 0) - } - catch { - case e: Exception => { - exceptions.add(e) - } - } - } - }) - i += 1 - } - executorService.shutdown - executorService.awaitTermination(30, TimeUnit.MINUTES) - assertTrue("Producers done in time", executorService.isTerminated) - assertTrue("No exceptions: " + exceptions, exceptions.isEmpty) - var totalSent: Long = toSend * payloadString.length - var duration: Double = System.currentTimeMillis - start - info("Duration: " + duration + "ms") - info("Rate: " + (toSend * 1000 / duration) + "m/s") - info("Total send: " + totalSent) - info("Total journal write: " + store.getLogAppendPosition) - info("Journal writes %: " + store.getLogAppendPosition / totalSent.asInstanceOf[Double] * 100 + "%") - stopBroker - restartBroker(0, 1200000) - consumeMessages(toSend) - } - - @Test def testPublishNoConsumerNoCheckpoint: Unit = { - toSend = 100 - startBroker(true, 0) - val sharedCount: AtomicLong = new AtomicLong(toSend) - var start: Long = System.currentTimeMillis - var executorService: ExecutorService = Executors.newCachedThreadPool - var i: Int = 0 - while (i < parallelProducer) { - executorService.execute(new Runnable { - def run: Unit = { - try { - publishMessages(sharedCount, 0) - } - catch { - case e: Exception => { - exceptions.add(e) - } - } - } - }) - i += 1; - } - executorService.shutdown - executorService.awaitTermination(30, TimeUnit.MINUTES) - assertTrue("Producers done in time", executorService.isTerminated) - assertTrue("No exceptions: " + exceptions, exceptions.isEmpty) - var totalSent: Long = toSend * payloadString.length - broker.getAdminView.gc - var duration: Double = System.currentTimeMillis - start - info("Duration: " + duration + "ms") - info("Rate: " + (toSend * 1000 / duration) + "m/s") - info("Total send: " + totalSent) - info("Total journal write: " + store.getLogAppendPosition) - info("Journal writes %: " + store.getLogAppendPosition / totalSent.asInstanceOf[Double] * 100 + "%") - stopBroker - restartBroker(0, 0) - consumeMessages(toSend) - } - - private def consumeMessages(count: Long): Unit = { - var connection: ActiveMQConnection = connectionFactory.createConnection.asInstanceOf[ActiveMQConnection] - connection.setWatchTopicAdvisories(false) - connection.start - var session: Session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE) - var consumer: MessageConsumer = session.createConsumer(destination) - var i: Int = 0 - while (i < count) { - assertNotNull("got message " + i, consumer.receive(10000)) - i += 1; - } - assertNull("none left over", consumer.receive(2000)) - } - - protected def restartBroker(restartDelay: Int, checkpoint: Int): Unit = { - stopBroker - TimeUnit.MILLISECONDS.sleep(restartDelay) - startBroker(false, checkpoint) - } - - override def tearDown() = stopBroker - - def stopBroker: Unit = { - if (broker != null) { - broker.stop - broker.waitUntilStopped - } - } - - private def publishMessages(count: AtomicLong, expiry: Int): Unit = { - var connection: ActiveMQConnection = connectionFactory.createConnection.asInstanceOf[ActiveMQConnection] - connection.setWatchTopicAdvisories(false) - connection.start - var session: Session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE) - var producer: MessageProducer = session.createProducer(destination) - var start: Long = System.currentTimeMillis - var i: Long = 0l - var bytes: Array[Byte] = payloadString.getBytes - while ((({ - i = count.getAndDecrement; i - })) > 0) { - var message: Message = null - if (useBytesMessage) { - message = session.createBytesMessage - (message.asInstanceOf[BytesMessage]).writeBytes(bytes) - } - else { - message = session.createTextMessage(payloadString) - } - producer.send(message, DeliveryMode.PERSISTENT, 5, expiry) - if (i != toSend && i % sampleRate == 0) { - var now: Long = System.currentTimeMillis - info("Remainder: " + i + ", rate: " + sampleRate * 1000 / (now - start) + "m/s") - start = now - } - } - connection.syncSendPacket(new ConnectionControl) - connection.close - } - - def startBroker(deleteAllMessages: Boolean, checkPointPeriod: Int): Unit = { - broker = new BrokerService - broker.setDeleteAllMessagesOnStartup(deleteAllMessages) - store = createStore - broker.setPersistenceAdapter(store) - broker.addConnector("tcp://0.0.0.0:0") - broker.start - var options: String = "?jms.watchTopicAdvisories=false&jms.useAsyncSend=true&jms.alwaysSessionAsync=false&jms.dispatchAsync=false&socketBufferSize=131072&ioBufferSize=16384&wireFormat.tightEncodingEnabled=false&wireFormat.cacheSize=8192" - connectionFactory = new ActiveMQConnectionFactory(broker.getTransportConnectors.get(0).getConnectUri + options) - } - - - - protected def createStore: LevelDBStore = { - var store: LevelDBStore = new LevelDBStore - store.setDirectory(new File("target/activemq-data/leveldb")) - return store - } - - private[leveldb] var broker: BrokerService = null - private[leveldb] var connectionFactory: ActiveMQConnectionFactory = null - private[leveldb] var store: LevelDBStore = null - private[leveldb] var destination: Destination = new ActiveMQQueue("Test") - private[leveldb] var payloadString: String = new String(new Array[Byte](6 * 1024)) - private[leveldb] var useBytesMessage: Boolean = true - private[leveldb] final val parallelProducer: Int = 20 - private[leveldb] var exceptions: Vector[Exception] = new Vector[Exception] - private[leveldb] var toSend: Long = 100000 - private[leveldb] final val sampleRate: Double = 100000 -} \ No newline at end of file diff --git a/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/LevelDBPlistTest.java b/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/LevelDBPlistTest.java deleted file mode 100644 index 78dd87df1a..0000000000 --- a/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/LevelDBPlistTest.java +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.activemq.leveldb.test; - -import org.apache.activemq.leveldb.LevelDBStore; -import org.apache.activemq.store.PListTestSupport; - -/** - * @author Hiram Chirino - */ -public class LevelDBPlistTest extends PListTestSupport { - - @Override - protected LevelDBStore createPListStore() { - return new LevelDBStore(); - } - - protected LevelDBStore createConcurrentAddIteratePListStore() { - return new LevelDBStore(); - } - - @Override - protected LevelDBStore createConcurrentAddRemovePListStore() { - return new LevelDBStore(); - } - - @Override - protected LevelDBStore createConcurrentAddRemoveWithPreloadPListStore() { - return new LevelDBStore(); - } - - @Override - protected LevelDBStore createConcurrentAddIterateRemovePListStore(boolean enablePageCache) { - return new LevelDBStore(); - } - -} diff --git a/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/LevelDBStoreTest.scala b/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/LevelDBStoreTest.scala deleted file mode 100644 index 5ecdaf5bad..0000000000 --- a/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/LevelDBStoreTest.scala +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.activemq.leveldb.test - -import org.apache.activemq.store.PersistenceAdapter -import org.apache.activemq.store.PersistenceAdapterTestSupport -import java.io.File -import org.apache.activemq.leveldb.LevelDBStore - -/** - *- *
- * - * @author Hiram Chirino - */ -class LevelDBStoreTest extends PersistenceAdapterTestSupport { - override def testStoreCanHandleDupMessages: Unit = { - } - - protected def createPersistenceAdapter(delete: Boolean): PersistenceAdapter = { - var store: LevelDBStore = new LevelDBStore - store.setDirectory(new File("target/activemq-data/haleveldb")) - if (delete) { - store.deleteAllMessages - } - return store - } -} \ No newline at end of file diff --git a/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/PListTest.java b/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/PListTest.java deleted file mode 100644 index 740102c56f..0000000000 --- a/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/PListTest.java +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.activemq.leveldb.test; - - -import org.apache.activemq.ActiveMQConnectionFactory; -import org.apache.activemq.broker.BrokerService; -import org.apache.activemq.broker.region.policy.PolicyEntry; -import org.apache.activemq.broker.region.policy.PolicyMap; -import org.apache.activemq.leveldb.LevelDBStore; -import org.apache.activemq.store.PersistenceAdapter; -import org.apache.tools.ant.util.FileUtils; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import javax.jms.Connection; -import javax.jms.DeliveryMode; -import javax.jms.MessageProducer; -import javax.jms.Session; -import java.io.File; - -public class PListTest { - - protected BrokerService brokerService; - - @Before - public void setUp() throws Exception { - brokerService = new BrokerService(); - brokerService.addConnector("tcp://localhost:0"); - - LevelDBStore store = new LevelDBStore(); - store.setDirectory(new File("target/activemq-data/haleveldb")); - store.deleteAllMessages(); - brokerService.setPersistenceAdapter(store); - - PolicyMap policyMap = new PolicyMap(); - PolicyEntry policy = new PolicyEntry(); - policy.setMemoryLimit(1); - policyMap.setDefaultEntry(policy); - brokerService.setDestinationPolicy(policyMap); - - brokerService.start(); - } - - @After - public void tearDown() throws Exception { - if (brokerService != null && brokerService.isStopped()) { - brokerService.stop(); - } - FileUtils.delete(new File("target/activemq-data/haleveldb")); - } - - @Test - public void testBrokerStop() throws Exception { - ActiveMQConnectionFactory factory = new ActiveMQConnectionFactory(brokerService.getTransportConnectors().get(0).getServer().getConnectURI().toString()); - Connection conn = factory.createConnection(); - Session sess = conn.createSession(false, Session.AUTO_ACKNOWLEDGE); - MessageProducer producer = sess.createProducer(sess.createQueue("TEST")); - producer.setDeliveryMode(DeliveryMode.NON_PERSISTENT); - for (int i = 0; i < 10000; i++) { - producer.send(sess.createTextMessage(i + " message")); - } - brokerService.stop(); - brokerService.waitUntilStopped(); - } - -} diff --git a/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/Scenario.scala b/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/Scenario.scala deleted file mode 100644 index 03fa7fa529..0000000000 --- a/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/Scenario.scala +++ /dev/null @@ -1,331 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.activemq.leveldb.test - -import java.util.concurrent.atomic._ -import java.util.concurrent.TimeUnit._ -import scala.collection.mutable.ListBuffer - -object Scenario { - val MESSAGE_ID:Array[Byte] = "message-id" - val NEWLINE = '\n'.toByte - val NANOS_PER_SECOND = NANOSECONDS.convert(1, SECONDS) - - implicit def toBytes(value: String):Array[Byte] = value.getBytes("UTF-8") - - def o[T](value:T):Option[T] = value match { - case null => None - case x => Some(x) - } -} - -trait Scenario { - import Scenario._ - - var url:String = "tcp://localhost:61616" - var user_name:String = _ - var password:String = _ - - private var _producer_sleep: { def apply(): Int; def init(time: Long) } = new { def apply() = 0; def init(time: Long) {} } - def producer_sleep = _producer_sleep() - def producer_sleep_= (new_value: Int) = _producer_sleep = new { def apply() = new_value; def init(time: Long) {} } - def producer_sleep_= (new_func: { def apply(): Int; def init(time: Long) }) = _producer_sleep = new_func - - private var _consumer_sleep: { def apply(): Int; def init(time: Long) } = new { def apply() = 0; def init(time: Long) {} } - def consumer_sleep = _consumer_sleep() - def consumer_sleep_= (new_value: Int) = _consumer_sleep = new { def apply() = new_value; def init(time: Long) {} } - def consumer_sleep_= (new_func: { def apply(): Int; def init(time: Long) }) = _consumer_sleep = new_func - - var producers = 1 - var producers_per_sample = 0 - - var consumers = 1 - var consumers_per_sample = 0 - var sample_interval = 1000 - - var message_size = 1024 - var persistent = false - - var headers = Array[Array[(String,String)]]() - var selector:String = null - var no_local = false - var durable = false - var ack_mode = "auto" - var messages_per_connection = -1L - var display_errors = false - - var destination_type = "queue" - private var _destination_name: () => String = () => "load" - def destination_name = _destination_name() - def destination_name_=(new_name: String) = _destination_name = () => new_name - def destination_name_=(new_func: () => String) = _destination_name = new_func - var destination_count = 1 - - val producer_counter = new AtomicLong() - val consumer_counter = new AtomicLong() - val error_counter = new AtomicLong() - val done = new AtomicBoolean() - - var queue_prefix = "" - var topic_prefix = "" - var name = "custom" - - var drain_timeout = 2000L - - def run() = { - print(toString) - println("--------------------------------------") - println(" Running: Press ENTER to stop") - println("--------------------------------------") - println("") - - with_load { - - // start a sampling client... - val sample_thread = new Thread() { - override def run() = { - - def print_rate(name: String, periodCount:Long, totalCount:Long, nanos: Long) = { - - val rate_per_second: java.lang.Float = ((1.0f * periodCount / nanos) * NANOS_PER_SECOND) - println("%s total: %,d, rate: %,.3f per second".format(name, totalCount, rate_per_second)) - } - - try { - var start = System.nanoTime - var total_producer_count = 0L - var total_consumer_count = 0L - var total_error_count = 0L - collection_start - while( !done.get ) { - Thread.sleep(sample_interval) - val end = System.nanoTime - collection_sample - val samples = collection_end - samples.get("p_custom").foreach { case (_, count)::Nil => - total_producer_count += count - print_rate("Producer", count, total_producer_count, end - start) - case _ => - } - samples.get("c_custom").foreach { case (_, count)::Nil => - total_consumer_count += count - print_rate("Consumer", count, total_consumer_count, end - start) - case _ => - } - samples.get("e_custom").foreach { case (_, count)::Nil => - if( count!= 0 ) { - total_error_count += count - print_rate("Error", count, total_error_count, end - start) - } - case _ => - } - start = end - } - } catch { - case e:InterruptedException => - } - } - } - sample_thread.start() - - System.in.read() - done.set(true) - - sample_thread.interrupt - sample_thread.join - } - - } - - override def toString() = { - "--------------------------------------\n"+ - "Scenario Settings\n"+ - "--------------------------------------\n"+ - " destination_type = "+destination_type+"\n"+ - " queue_prefix = "+queue_prefix+"\n"+ - " topic_prefix = "+topic_prefix+"\n"+ - " destination_count = "+destination_count+"\n" + - " destination_name = "+destination_name+"\n" + - " sample_interval (ms) = "+sample_interval+"\n" + - " \n"+ - " --- Producer Properties ---\n"+ - " producers = "+producers+"\n"+ - " message_size = "+message_size+"\n"+ - " persistent = "+persistent+"\n"+ - " producer_sleep (ms) = "+producer_sleep+"\n"+ - " headers = "+headers.mkString(", ")+"\n"+ - " \n"+ - " --- Consumer Properties ---\n"+ - " consumers = "+consumers+"\n"+ - " consumer_sleep (ms) = "+consumer_sleep+"\n"+ - " selector = "+selector+"\n"+ - " durable = "+durable+"\n"+ - "" - - } - - protected def headers_for(i:Int) = { - if ( headers.isEmpty ) { - Array[(String, String)]() - } else { - headers(i%headers.size) - } - } - - var producer_samples:Option[ListBuffer[(Long,Long)]] = None - var consumer_samples:Option[ListBuffer[(Long,Long)]] = None - var error_samples = ListBuffer[(Long,Long)]() - - def collection_start: Unit = { - producer_counter.set(0) - consumer_counter.set(0) - error_counter.set(0) - - producer_samples = if (producers > 0 || producers_per_sample>0 ) { - Some(ListBuffer[(Long,Long)]()) - } else { - None - } - consumer_samples = if (consumers > 0 || consumers_per_sample>0 ) { - Some(ListBuffer[(Long,Long)]()) - } else { - None - } - } - - def collection_end: Map[String, scala.List[(Long,Long)]] = { - var rc = Map[String, List[(Long,Long)]]() - producer_samples.foreach{ samples => - rc += "p_"+name -> samples.toList - samples.clear - } - consumer_samples.foreach{ samples => - rc += "c_"+name -> samples.toList - samples.clear - } - rc += "e_"+name -> error_samples.toList - error_samples.clear - rc - } - - trait Client { - def start():Unit - def shutdown():Unit - } - - var producer_clients = List[Client]() - var consumer_clients = List[Client]() - - def with_load[T](func: =>T ):T = { - done.set(false) - - _producer_sleep.init(System.currentTimeMillis()) - _consumer_sleep.init(System.currentTimeMillis()) - - for (i <- 0 until producers) { - val client = createProducer(i) - producer_clients ::= client - client.start() - } - - for (i <- 0 until consumers) { - val client = createConsumer(i) - consumer_clients ::= client - client.start() - } - - try { - func - } finally { - done.set(true) - // wait for the threads to finish.. - for( client <- consumer_clients ) { - client.shutdown - } - consumer_clients = List() - for( client <- producer_clients ) { - client.shutdown - } - producer_clients = List() - } - } - - def drain = { - done.set(false) - if( destination_type=="queue" || destination_type=="raw_queue" || durable==true ) { - print("draining") - consumer_counter.set(0) - var consumer_clients = List[Client]() - for (i <- 0 until destination_count) { - val client = createConsumer(i) - consumer_clients ::= client - client.start() - } - - // Keep sleeping until we stop draining messages. - var drained = 0L - try { - Thread.sleep(drain_timeout); - def done() = { - val c = consumer_counter.getAndSet(0) - drained += c - c == 0 - } - while( !done ) { - print(".") - Thread.sleep(drain_timeout); - } - } finally { - done.set(true) - for( client <- consumer_clients ) { - client.shutdown - } - println(". (drained %d)".format(drained)) - } - } - } - - - def collection_sample: Unit = { - - val now = System.currentTimeMillis() - producer_samples.foreach(_.append((now, producer_counter.getAndSet(0)))) - consumer_samples.foreach(_.append((now, consumer_counter.getAndSet(0)))) - error_samples.append((now, error_counter.getAndSet(0))) - - // we might need to increment number the producers.. - for (i <- 0 until producers_per_sample) { - val client = createProducer(producer_clients.length) - producer_clients ::= client - client.start() - } - - // we might need to increment number the consumers.. - for (i <- 0 until consumers_per_sample) { - val client = createConsumer(consumer_clients.length) - consumer_clients ::= client - client.start() - } - - } - - def createProducer(i:Int):Client - def createConsumer(i:Int):Client - -} - - diff --git a/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/TestingHDFSServer.scala b/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/TestingHDFSServer.scala deleted file mode 100644 index 02614ec9fe..0000000000 --- a/activemq-leveldb-store/src/test/scala/org/apache/activemq/leveldb/test/TestingHDFSServer.scala +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.activemq.leveldb.test - -import org.apache.hadoop.conf.Configuration -import org.apache.hadoop.fs.FileSystem -import org.apache.hadoop.hdfs.MiniDFSCluster -import java.io.IOException - -/** - *- *
- * - * @author Hiram Chirino - */ -object TestingHDFSServer { - private[leveldb] def start: Unit = { - var conf: Configuration = new Configuration - cluster = new MiniDFSCluster(conf, 1, true, null) - cluster.waitActive - fs = cluster.getFileSystem - } - - private[leveldb] def stop: Unit = { - try { - cluster.shutdown - } - catch { - case e: Throwable => { - e.printStackTrace - } - } - } - - private[leveldb] var cluster: MiniDFSCluster = null - private[leveldb] var fs: FileSystem = null -} \ No newline at end of file diff --git a/activemq-osgi/pom.xml b/activemq-osgi/pom.xml index cee87ba65e..06549cd35e 100644 --- a/activemq-osgi/pom.xml +++ b/activemq-osgi/pom.xml @@ -60,13 +60,11 @@ org.jasypt*;resolution:=optional, org.eclipse.jetty*;resolution:=optional;version="[9.0,10)", org.apache.zookeeper*;resolution:=optional, - org.fusesource.leveldbjni*;resolution:=optional, org.fusesource.hawtjni*;resolution:=optional, org.springframework.jms*;version="[4,5)";resolution:=optional, org.springframework.transaction*;version="[4,5)";resolution:=optional, org.springframework*;version="[4,5)";resolution:=optional, org.xmlpull*;resolution:=optional, - scala*;resolution:=optional, javax.annotation*;version="[1,4)", !com.thoughtworks.qdox*, org.apache.commons.logging;version="[1.2,2)";resolution:=optional, @@ -125,11 +123,6 @@