Fixes AMQ-4080: Integrate the Fusesource LevelDB module into the ActiveMQ build.

git-svn-id: https://svn.apache.org/repos/asf/activemq/trunk@1389882 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Hiram R. Chirino 2012-09-25 14:32:28 +00:00
parent f3c9c74334
commit b20d5411d1
42 changed files with 22728 additions and 2 deletions

View File

@ -1056,6 +1056,8 @@
</execution>
</executions>
</plugin>
<!-- disabled until the xbean 3.11.2 plugin is released -->
<!--
<plugin>
<groupId>org.apache.xbean</groupId>
<artifactId>maven-xbean-plugin</artifactId>
@ -1064,6 +1066,9 @@
<execution>
<phase>process-classes</phase>
<configuration>
<includes>
<include>${basedir}/../activemq-leveldb/src/main/scala</include>
</includes>
<strictXsdOrder>false</strictXsdOrder>
<namespace>http://activemq.apache.org/schema/core</namespace>
<schema>${basedir}/target/classes/activemq.xsd</schema>
@ -1084,6 +1089,7 @@
</dependency>
</dependencies>
</plugin>
-->
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>cobertura-maven-plugin</artifactId>
@ -1222,5 +1228,47 @@
</plugins>
</build>
</profile>
<!-- To generate the XBean meta-data, run: mvn -P xbean-generate clean process-classes -->
<profile>
<id>xbean-generate</id>
<build>
<plugins>
<plugin>
<groupId>org.apache.xbean</groupId>
<artifactId>maven-xbean-plugin</artifactId>
<version>3.11.2-SNAPSHOT</version>
<executions>
<execution>
<phase>process-classes</phase>
<configuration>
<includes>
<include>${basedir}/../activemq-leveldb/src/main/java</include>
</includes>
<classPathIncludes>
<classPathInclude>${basedir}/../activemq-leveldb/target/classes</classPathInclude>
</classPathIncludes>
<strictXsdOrder>false</strictXsdOrder>
<namespace>http://activemq.apache.org/schema/core</namespace>
<schema>${basedir}/src/main/resources/activemq.xsd</schema>
<outputDir>${basedir}/src/main/resources</outputDir>
<generateSpringSchemasFile>false</generateSpringSchemasFile>
<excludedClasses>org.apache.activemq.broker.jmx.AnnotatedMBean,org.apache.activemq.broker.jmx.DestinationViewMBean</excludedClasses>
</configuration>
<goals>
<goal>mapping</goal>
</goals>
</execution>
</executions>
<dependencies>
<dependency>
<groupId>com.thoughtworks.qdox</groupId>
<artifactId>qdox</artifactId>
<version>1.12</version>
</dependency>
</dependencies>
</plugin>
</plugins>
</build>
</profile>
</profiles>
</project>

View File

@ -0,0 +1,381 @@
# NOTE: this file is autogenerated by Apache XBean
# beans
abortSlowConsumerStrategy = org.apache.activemq.broker.region.policy.AbortSlowConsumerStrategy
amqPersistenceAdapter = org.apache.activemq.store.amq.AMQPersistenceAdapter
amqPersistenceAdapter.indexPageSize.propertyEditor = org.apache.activemq.util.MemoryPropertyEditor
amqPersistenceAdapter.maxCheckpointMessageAddSize.propertyEditor = org.apache.activemq.util.MemoryIntPropertyEditor
amqPersistenceAdapter.maxFileLength.propertyEditor = org.apache.activemq.util.MemoryPropertyEditor
amqPersistenceAdapter.maxReferenceFileLength.propertyEditor = org.apache.activemq.util.MemoryPropertyEditor
amqPersistenceAdapterFactory = org.apache.activemq.store.amq.AMQPersistenceAdapterFactory
authenticationUser = org.apache.activemq.security.AuthenticationUser
org.apache.activemq.security.AuthenticationUser(java.lang.String,java.lang.String,java.lang.String).parameterNames = username password groups
authorizationEntry = org.apache.activemq.security.AuthorizationEntry
authorizationMap = org.apache.activemq.security.DefaultAuthorizationMap
org.apache.activemq.security.DefaultAuthorizationMap(java.util.List).parameterNames = authorizationEntries
authorizationPlugin = org.apache.activemq.security.AuthorizationPlugin
org.apache.activemq.security.AuthorizationPlugin(org.apache.activemq.security.AuthorizationMap).parameterNames = map
axionJDBCAdapter = org.apache.activemq.store.jdbc.adapter.AxionJDBCAdapter
blobJDBCAdapter = org.apache.activemq.store.jdbc.adapter.BlobJDBCAdapter
broker = org.apache.activemq.xbean.XBeanBrokerService
broker.initMethod = afterPropertiesSet
broker.destroyMethod = destroy
broker.advisorySupport.propertyEditor = org.apache.activemq.util.BooleanEditor
broker.deleteAllMessagesOnStartup.propertyEditor = org.apache.activemq.util.BooleanEditor
broker.passiveSlave.propertyEditor = org.apache.activemq.util.BooleanEditor
broker.persistent.propertyEditor = org.apache.activemq.util.BooleanEditor
broker.schedulerSupport.propertyEditor = org.apache.activemq.util.BooleanEditor
broker.shutdownOnSlaveFailure.propertyEditor = org.apache.activemq.util.BooleanEditor
broker.systemExitOnShutdown.propertyEditor = org.apache.activemq.util.BooleanEditor
broker.useJmx.propertyEditor = org.apache.activemq.util.BooleanEditor
broker.waitForSlave.propertyEditor = org.apache.activemq.util.BooleanEditor
brokerService = org.apache.activemq.broker.BrokerService
brokerService.advisorySupport.propertyEditor = org.apache.activemq.util.BooleanEditor
brokerService.deleteAllMessagesOnStartup.propertyEditor = org.apache.activemq.util.BooleanEditor
brokerService.passiveSlave.propertyEditor = org.apache.activemq.util.BooleanEditor
brokerService.persistent.propertyEditor = org.apache.activemq.util.BooleanEditor
brokerService.schedulerSupport.propertyEditor = org.apache.activemq.util.BooleanEditor
brokerService.shutdownOnSlaveFailure.propertyEditor = org.apache.activemq.util.BooleanEditor
brokerService.systemExitOnShutdown.propertyEditor = org.apache.activemq.util.BooleanEditor
brokerService.useJmx.propertyEditor = org.apache.activemq.util.BooleanEditor
brokerService.waitForSlave.propertyEditor = org.apache.activemq.util.BooleanEditor
bytesJDBCAdapter = org.apache.activemq.store.jdbc.adapter.BytesJDBCAdapter
cachedLDAPAuthorizationMap = org.apache.activemq.security.CachedLDAPAuthorizationMap
commandAgent = org.apache.activemq.broker.util.CommandAgent
commandAgent.initMethod = start
commandAgent.destroyMethod = stop
compositeDemandForwardingBridge = org.apache.activemq.network.CompositeDemandForwardingBridge
org.apache.activemq.network.CompositeDemandForwardingBridge(org.apache.activemq.network.NetworkBridgeConfiguration,org.apache.activemq.transport.Transport,org.apache.activemq.transport.Transport).parameterNames = configuration localBroker remoteBroker
compositeQueue = org.apache.activemq.broker.region.virtual.CompositeQueue
compositeTopic = org.apache.activemq.broker.region.virtual.CompositeTopic
conditionalNetworkBridgeFilterFactory = org.apache.activemq.network.ConditionalNetworkBridgeFilterFactory
connectionDotFilePlugin = org.apache.activemq.broker.view.ConnectionDotFilePlugin
connectionFactory = org.apache.activemq.spring.ActiveMQConnectionFactory
connectionFactory.initMethod = afterPropertiesSet
constantPendingMessageLimitStrategy = org.apache.activemq.broker.region.policy.ConstantPendingMessageLimitStrategy
database-locker = org.apache.activemq.store.jdbc.DefaultDatabaseLocker
db2JDBCAdapter = org.apache.activemq.store.jdbc.adapter.DB2JDBCAdapter
defaultIOExceptionHandler = org.apache.activemq.util.DefaultIOExceptionHandler
defaultJDBCAdapter = org.apache.activemq.store.jdbc.adapter.DefaultJDBCAdapter
defaultNetworkBridgeFilterFactory = org.apache.activemq.network.DefaultNetworkBridgeFilterFactory
defaultUsageCapacity = org.apache.activemq.usage.DefaultUsageCapacity
demandForwardingBridge = org.apache.activemq.network.DemandForwardingBridge
org.apache.activemq.network.DemandForwardingBridge(org.apache.activemq.network.NetworkBridgeConfiguration,org.apache.activemq.transport.Transport,org.apache.activemq.transport.Transport).parameterNames = configuration localBroker remoteBroker
destinationDotFilePlugin = org.apache.activemq.broker.view.DestinationDotFilePlugin
destinationEntry = org.apache.activemq.filter.DefaultDestinationMapEntry
destinationPathSeparatorPlugin = org.apache.activemq.broker.util.DestinationPathSeparatorBroker
discardingDLQBrokerPlugin = org.apache.activemq.plugin.DiscardingDLQBrokerPlugin
fileCursor = org.apache.activemq.broker.region.policy.FilePendingSubscriberMessageStoragePolicy
fileDurableSubscriberCursor = org.apache.activemq.broker.region.policy.FilePendingDurableSubscriberMessageStoragePolicy
fileQueueCursor = org.apache.activemq.broker.region.policy.FilePendingQueueMessageStoragePolicy
filteredDestination = org.apache.activemq.broker.region.virtual.FilteredDestination
filteredKahaDB = org.apache.activemq.store.kahadb.FilteredKahaDBPersistenceAdapter
org.apache.activemq.store.kahadb.FilteredKahaDBPersistenceAdapter(org.apache.activemq.command.ActiveMQDestination,org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter).parameterNames = destination adapter
fixedCountSubscriptionRecoveryPolicy = org.apache.activemq.broker.region.policy.FixedCountSubscriptionRecoveryPolicy
fixedSizedSubscriptionRecoveryPolicy = org.apache.activemq.broker.region.policy.FixedSizedSubscriptionRecoveryPolicy
forcePersistencyModeBroker = org.apache.activemq.plugin.ForcePersistencyModeBroker
org.apache.activemq.plugin.ForcePersistencyModeBroker(org.apache.activemq.broker.Broker).parameterNames = next
forcePersistencyModeBrokerPlugin = org.apache.activemq.plugin.ForcePersistencyModeBrokerPlugin
forwardingBridge = org.apache.activemq.network.ForwardingBridge
org.apache.activemq.network.ForwardingBridge(org.apache.activemq.transport.Transport,org.apache.activemq.transport.Transport).parameterNames = localBroker remoteBroker
hsqldb-jdbc-adapter = org.apache.activemq.store.jdbc.adapter.HsqldbJDBCAdapter
imageBasedJDBCAdaptor = org.apache.activemq.store.jdbc.adapter.ImageBasedJDBCAdaptor
inboundQueueBridge = org.apache.activemq.network.jms.InboundQueueBridge
org.apache.activemq.network.jms.InboundQueueBridge(java.lang.String).parameterNames = inboundQueueName
inboundTopicBridge = org.apache.activemq.network.jms.InboundTopicBridge
org.apache.activemq.network.jms.InboundTopicBridge(java.lang.String).parameterNames = inboundTopicName
individualDeadLetterStrategy = org.apache.activemq.broker.region.policy.IndividualDeadLetterStrategy
informixJDBCAdapter = org.apache.activemq.store.jdbc.adapter.InformixJDBCAdapter
jDBCIOExceptionHandler = org.apache.activemq.store.jdbc.JDBCIOExceptionHandler
jaasAuthenticationPlugin = org.apache.activemq.security.JaasAuthenticationPlugin
jaasCertificateAuthenticationPlugin = org.apache.activemq.security.JaasCertificateAuthenticationPlugin
jaasDualAuthenticationPlugin = org.apache.activemq.security.JaasDualAuthenticationPlugin
jdbcPersistenceAdapter = org.apache.activemq.store.jdbc.JDBCPersistenceAdapter
org.apache.activemq.store.jdbc.JDBCPersistenceAdapter(javax.sql.DataSource,org.apache.activemq.wireformat.WireFormat).parameterNames = ds wireFormat
jmsQueueConnector = org.apache.activemq.network.jms.JmsQueueConnector
jmsTopicConnector = org.apache.activemq.network.jms.JmsTopicConnector
journalPersistenceAdapter = org.apache.activemq.store.journal.JournalPersistenceAdapter
org.apache.activemq.store.journal.JournalPersistenceAdapter(org.apache.activeio.journal.Journal,org.apache.activemq.store.PersistenceAdapter,org.apache.activemq.thread.TaskRunnerFactory).parameterNames = journal longTermPersistence taskRunnerFactory
journalPersistenceAdapterFactory = org.apache.activemq.store.journal.JournalPersistenceAdapterFactory
journalPersistenceAdapterFactory.journalLogFileSize.propertyEditor = org.apache.activemq.util.MemoryIntPropertyEditor
journaledJDBC = org.apache.activemq.store.PersistenceAdapterFactoryBean
journaledJDBC.journalLogFileSize.propertyEditor = org.apache.activemq.util.MemoryIntPropertyEditor
kahaDB = org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter
kahaDB.indexCacheSize.propertyEditor = org.apache.activemq.util.MemoryPropertyEditor
kahaDB.indexWriteBatchSize.propertyEditor = org.apache.activemq.util.MemoryPropertyEditor
kahaDB.journalMaxFileLength.propertyEditor = org.apache.activemq.util.MemoryIntPropertyEditor
kahaDB.journalMaxWriteBatchSize.propertyEditor = org.apache.activemq.util.MemoryPropertyEditor
kahaPersistenceAdapter = org.apache.activemq.store.kahadaptor.KahaPersistenceAdapter
kahaPersistenceAdapter.maxDataFileLength.propertyEditor = org.apache.activemq.util.MemoryPropertyEditor
org.apache.activemq.store.kahadaptor.KahaPersistenceAdapter(java.util.concurrent.atomic.AtomicLong).parameterNames = size
lDAPAuthorizationMap = org.apache.activemq.security.LDAPAuthorizationMap
org.apache.activemq.security.LDAPAuthorizationMap(java.util.Map).parameterNames = options
lastImageSubscriptionRecoveryPolicy = org.apache.activemq.broker.region.policy.LastImageSubscriptionRecoveryPolicy
ldapNetworkConnector = org.apache.activemq.network.LdapNetworkConnector
ldapNetworkConnector.prefetchSize.propertyEditor = org.apache.activemq.util.MemoryIntPropertyEditor
lease-database-locker = org.apache.activemq.store.jdbc.LeaseDatabaseLocker
levelDB = org.apache.activemq.store.leveldb.LevelDBPersistenceAdapter
loggingBrokerPlugin = org.apache.activemq.broker.util.LoggingBrokerPlugin
loggingBrokerPlugin.initMethod = afterPropertiesSet
mKahaDB = org.apache.activemq.store.kahadb.MultiKahaDBPersistenceAdapter
mKahaDB.journalMaxFileLength.propertyEditor = org.apache.activemq.util.MemoryIntPropertyEditor
mKahaDB.journalWriteBatchSize.propertyEditor = org.apache.activemq.util.MemoryIntPropertyEditor
managementContext = org.apache.activemq.broker.jmx.ManagementContext
managementContext.connectorPort.propertyEditor = org.apache.activemq.util.MemoryIntPropertyEditor
managementContext.createConnector.propertyEditor = org.apache.activemq.util.BooleanEditor
managementContext.rmiServerPort.propertyEditor = org.apache.activemq.util.MemoryIntPropertyEditor
org.apache.activemq.broker.jmx.ManagementContext(javax.management.MBeanServer).parameterNames = server
masterConnector = org.apache.activemq.broker.ft.MasterConnector
org.apache.activemq.broker.ft.MasterConnector(java.lang.String).parameterNames = remoteUri
maxdb-jdbc-adapter = org.apache.activemq.store.jdbc.adapter.MaxDBJDBCAdapter
memoryPersistenceAdapter = org.apache.activemq.store.memory.MemoryPersistenceAdapter
memoryUsage = org.apache.activemq.usage.MemoryUsage
memoryUsage.limit.propertyEditor = org.apache.activemq.util.MemoryPropertyEditor
memoryUsage.percentUsageMinDelta.propertyEditor = org.apache.activemq.util.MemoryPropertyEditor
org.apache.activemq.usage.MemoryUsage(org.apache.activemq.usage.MemoryUsage).parameterNames = parent
org.apache.activemq.usage.MemoryUsage(java.lang.String).parameterNames = name
org.apache.activemq.usage.MemoryUsage(org.apache.activemq.usage.MemoryUsage,java.lang.String).parameterNames = parent name
org.apache.activemq.usage.MemoryUsage(org.apache.activemq.usage.MemoryUsage,java.lang.String,float).parameterNames = parent name portion
messageGroupHashBucketFactory = org.apache.activemq.broker.region.group.MessageGroupHashBucketFactory
mirroredQueue = org.apache.activemq.broker.region.virtual.MirroredQueue
multicastNetworkConnector = org.apache.activemq.network.MulticastNetworkConnector
multicastNetworkConnector.prefetchSize.propertyEditor = org.apache.activemq.util.MemoryIntPropertyEditor
org.apache.activemq.network.MulticastNetworkConnector(java.net.URI).parameterNames = remoteURI
multicastTraceBrokerPlugin = org.apache.activemq.broker.util.MulticastTraceBrokerPlugin
mysql-jdbc-adapter = org.apache.activemq.store.jdbc.adapter.MySqlJDBCAdapter
networkConnector = org.apache.activemq.network.DiscoveryNetworkConnector
networkConnector.prefetchSize.propertyEditor = org.apache.activemq.util.MemoryIntPropertyEditor
org.apache.activemq.network.DiscoveryNetworkConnector(java.net.URI).parameterNames = discoveryURI
noSubscriptionRecoveryPolicy = org.apache.activemq.broker.region.policy.NoSubscriptionRecoveryPolicy
oldestMessageEvictionStrategy = org.apache.activemq.broker.region.policy.OldestMessageEvictionStrategy
oldestMessageWithLowestPriorityEvictionStrategy = org.apache.activemq.broker.region.policy.OldestMessageWithLowestPriorityEvictionStrategy
oracleBlobJDBCAdapter = org.apache.activemq.store.jdbc.adapter.OracleBlobJDBCAdapter
oracleJDBCAdapter = org.apache.activemq.store.jdbc.adapter.OracleJDBCAdapter
outboundQueueBridge = org.apache.activemq.network.jms.OutboundQueueBridge
org.apache.activemq.network.jms.OutboundQueueBridge(java.lang.String).parameterNames = outboundQueueName
outboundTopicBridge = org.apache.activemq.network.jms.OutboundTopicBridge
org.apache.activemq.network.jms.OutboundTopicBridge(java.lang.String).parameterNames = outboundTopicName
pListStore = org.apache.activemq.store.kahadb.plist.PListStore
policyEntry = org.apache.activemq.broker.region.policy.PolicyEntry
policyEntry.memoryLimit.propertyEditor = org.apache.activemq.util.MemoryPropertyEditor
policyMap = org.apache.activemq.broker.region.policy.PolicyMap
postgresql-jdbc-adapter = org.apache.activemq.store.jdbc.adapter.PostgresqlJDBCAdapter
prefetchPolicy = org.apache.activemq.ActiveMQPrefetchPolicy
prefetchRatePendingMessageLimitStrategy = org.apache.activemq.broker.region.policy.PrefetchRatePendingMessageLimitStrategy
priorityNetworkDispatchPolicy = org.apache.activemq.broker.region.policy.PriorityNetworkDispatchPolicy
proxyConnector = org.apache.activemq.proxy.ProxyConnector
queryBasedSubscriptionRecoveryPolicy = org.apache.activemq.broker.region.policy.QueryBasedSubscriptionRecoveryPolicy
queue = org.apache.activemq.command.ActiveMQQueue
org.apache.activemq.command.ActiveMQQueue(java.lang.String).parameterNames = name
queueDispatchSelector = org.apache.activemq.broker.region.QueueDispatchSelector
org.apache.activemq.broker.region.QueueDispatchSelector(org.apache.activemq.command.ActiveMQDestination).parameterNames = destination
reconnectionPolicy = org.apache.activemq.network.jms.ReconnectionPolicy
redeliveryPlugin = org.apache.activemq.broker.util.RedeliveryPlugin
redeliveryPolicy = org.apache.activemq.RedeliveryPolicy
redeliveryPolicyMap = org.apache.activemq.broker.region.policy.RedeliveryPolicyMap
roundRobinDispatchPolicy = org.apache.activemq.broker.region.policy.RoundRobinDispatchPolicy
shared-file-locker = org.apache.activemq.store.SharedFileLocker
sharedDeadLetterStrategy = org.apache.activemq.broker.region.policy.SharedDeadLetterStrategy
simpleAuthenticationPlugin = org.apache.activemq.security.SimpleAuthenticationPlugin
org.apache.activemq.security.SimpleAuthenticationPlugin(java.util.List).parameterNames = users
simpleAuthorizationMap = org.apache.activemq.security.SimpleAuthorizationMap
org.apache.activemq.security.SimpleAuthorizationMap(org.apache.activemq.filter.DestinationMap,org.apache.activemq.filter.DestinationMap,org.apache.activemq.filter.DestinationMap).parameterNames = writeACLs readACLs adminACLs
simpleDispatchPolicy = org.apache.activemq.broker.region.policy.SimpleDispatchPolicy
simpleDispatchSelector = org.apache.activemq.broker.region.policy.SimpleDispatchSelector
org.apache.activemq.broker.region.policy.SimpleDispatchSelector(org.apache.activemq.command.ActiveMQDestination).parameterNames = destination
simpleJmsMessageConvertor = org.apache.activemq.network.jms.SimpleJmsMessageConvertor
simpleMessageGroupMapFactory = org.apache.activemq.broker.region.group.SimpleMessageGroupMapFactory
sslContext = org.apache.activemq.spring.SpringSslContext
sslContext.initMethod = afterPropertiesSet
statements = org.apache.activemq.store.jdbc.Statements
statisticsBrokerPlugin = org.apache.activemq.plugin.StatisticsBrokerPlugin
storeCursor = org.apache.activemq.broker.region.policy.StorePendingQueueMessageStoragePolicy
storeDurableSubscriberCursor = org.apache.activemq.broker.region.policy.StorePendingDurableSubscriberMessageStoragePolicy
storeUsage = org.apache.activemq.usage.StoreUsage
storeUsage.limit.propertyEditor = org.apache.activemq.util.MemoryPropertyEditor
storeUsage.percentUsageMinDelta.propertyEditor = org.apache.activemq.util.MemoryPropertyEditor
org.apache.activemq.usage.StoreUsage(java.lang.String,org.apache.activemq.store.PersistenceAdapter).parameterNames = name store
org.apache.activemq.usage.StoreUsage(org.apache.activemq.usage.StoreUsage,java.lang.String).parameterNames = parent name
streamJDBCAdapter = org.apache.activemq.store.jdbc.adapter.StreamJDBCAdapter
strictOrderDispatchPolicy = org.apache.activemq.broker.region.policy.StrictOrderDispatchPolicy
sybase-jdbc-adapter = org.apache.activemq.store.jdbc.adapter.SybaseJDBCAdapter
systemUsage = org.apache.activemq.usage.SystemUsage
org.apache.activemq.usage.SystemUsage(java.lang.String,org.apache.activemq.store.PersistenceAdapter,org.apache.activemq.store.kahadb.plist.PListStore).parameterNames = name adapter tempStore
org.apache.activemq.usage.SystemUsage(org.apache.activemq.usage.SystemUsage,java.lang.String).parameterNames = parent name
taskRunnerFactory = org.apache.activemq.thread.TaskRunnerFactory
org.apache.activemq.thread.TaskRunnerFactory(java.lang.String).parameterNames = name
org.apache.activemq.thread.TaskRunnerFactory(java.lang.String,int,boolean,int,boolean).parameterNames = name priority daemon maxIterationsPerRun dedicatedTaskRunner
org.apache.activemq.thread.TaskRunnerFactory(java.lang.String,int,boolean,int,boolean,int).parameterNames = name priority daemon maxIterationsPerRun dedicatedTaskRunner maxThreadPoolSize
tempDestinationAuthorizationEntry = org.apache.activemq.security.TempDestinationAuthorizationEntry
tempQueue = org.apache.activemq.command.ActiveMQTempQueue
org.apache.activemq.command.ActiveMQTempQueue(java.lang.String).parameterNames = name
org.apache.activemq.command.ActiveMQTempQueue(org.apache.activemq.command.ConnectionId,long).parameterNames = connectionId sequenceId
tempTopic = org.apache.activemq.command.ActiveMQTempTopic
org.apache.activemq.command.ActiveMQTempTopic(java.lang.String).parameterNames = name
org.apache.activemq.command.ActiveMQTempTopic(org.apache.activemq.command.ConnectionId,long).parameterNames = connectionId sequenceId
tempUsage = org.apache.activemq.usage.TempUsage
tempUsage.limit.propertyEditor = org.apache.activemq.util.MemoryPropertyEditor
tempUsage.percentUsageMinDelta.propertyEditor = org.apache.activemq.util.MemoryPropertyEditor
org.apache.activemq.usage.TempUsage(java.lang.String,org.apache.activemq.store.kahadb.plist.PListStore).parameterNames = name store
org.apache.activemq.usage.TempUsage(org.apache.activemq.usage.TempUsage,java.lang.String).parameterNames = parent name
timeStampingBrokerPlugin = org.apache.activemq.broker.util.TimeStampingBrokerPlugin
timedSubscriptionRecoveryPolicy = org.apache.activemq.broker.region.policy.TimedSubscriptionRecoveryPolicy
topic = org.apache.activemq.command.ActiveMQTopic
org.apache.activemq.command.ActiveMQTopic(java.lang.String).parameterNames = name
traceBrokerPathPlugin = org.apache.activemq.broker.util.TraceBrokerPathPlugin
transact-database-locker = org.apache.activemq.store.jdbc.adapter.TransactDatabaseLocker
transact-jdbc-adapter = org.apache.activemq.store.jdbc.adapter.TransactJDBCAdapter
transportConnector = org.apache.activemq.broker.TransportConnector
org.apache.activemq.broker.TransportConnector(org.apache.activemq.transport.TransportServer).parameterNames = server
udpTraceBrokerPlugin = org.apache.activemq.broker.util.UDPTraceBrokerPlugin
uniquePropertyMessageEvictionStrategy = org.apache.activemq.broker.region.policy.UniquePropertyMessageEvictionStrategy
usageCapacity = org.apache.activemq.usage.UsageCapacity
virtualDestinationInterceptor = org.apache.activemq.broker.region.virtual.VirtualDestinationInterceptor
virtualSelectorCacheBrokerPlugin = org.apache.activemq.plugin.SubQueueSelectorCacheBrokerPlugin
virtualTopic = org.apache.activemq.broker.region.virtual.VirtualTopic
vmCursor = org.apache.activemq.broker.region.policy.VMPendingSubscriberMessageStoragePolicy
vmDurableCursor = org.apache.activemq.broker.region.policy.VMPendingDurableSubscriberMessageStoragePolicy
vmQueueCursor = org.apache.activemq.broker.region.policy.VMPendingQueueMessageStoragePolicy
xaConnectionFactory = org.apache.activemq.spring.ActiveMQXAConnectionFactory
xaConnectionFactory.initMethod = afterPropertiesSet

View File

@ -0,0 +1,3 @@
#Generated by xbean-spring
#Tue Sep 25 10:20:04 EDT 2012
http\://activemq.apache.org/schema/core=org.apache.xbean.spring.context.v2.XBeanNamespaceHandler

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

434
activemq-leveldb/pom.xml Normal file
View File

@ -0,0 +1,434 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.activemq</groupId>
<artifactId>activemq-parent</artifactId>
<version>5.7-SNAPSHOT</version>
</parent>
<artifactId>activemq-leveldb</artifactId>
<packaging>jar</packaging>
<name>ActiveMQ :: LevelDB</name>
<description>ActiveMQ LevelDB based store</description>
<dependencies>
<!-- for scala support -->
<dependency>
<groupId>org.scala-lang</groupId>
<artifactId>scala-library</artifactId>
<version>${scala-version}</version>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.apache.activemq</groupId>
<artifactId>activemq-core</artifactId>
<version>5.7-SNAPSHOT</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.fusesource.hawtbuf</groupId>
<artifactId>hawtbuf-proto</artifactId>
<version>${hawtbuf-version}</version>
</dependency>
<dependency>
<groupId>org.fusesource.hawtdispatch</groupId>
<artifactId>hawtdispatch-scala</artifactId>
<version>${hawtdispatch-version}</version>
</dependency>
<dependency>
<groupId>org.iq80.leveldb</groupId>
<artifactId>leveldb</artifactId>
<version>0.2</version>
</dependency>
<dependency>
<groupId>org.fusesource.leveldbjni</groupId>
<artifactId>leveldbjni-osx</artifactId>
<version>1.3</version>
</dependency>
<dependency>
<groupId>org.fusesource.leveldbjni</groupId>
<artifactId>leveldbjni-linux32</artifactId>
<version>1.3</version>
</dependency>
<dependency>
<groupId>org.fusesource.leveldbjni</groupId>
<artifactId>leveldbjni-linux64</artifactId>
<version>1.3</version>
</dependency>
<dependency>
<groupId>org.fusesource.leveldbjni</groupId>
<artifactId>leveldbjni-win32</artifactId>
<version>1.3</version>
</dependency>
<dependency>
<groupId>org.fusesource.leveldbjni</groupId>
<artifactId>leveldbjni-win64</artifactId>
<version>1.3</version>
</dependency>
<!-- For Optional Snappy Compression -->
<dependency>
<groupId>org.xerial.snappy</groupId>
<artifactId>snappy-java</artifactId>
<version>1.0.3</version>
</dependency>
<dependency>
<groupId>org.iq80.snappy</groupId>
<artifactId>snappy</artifactId>
<version>0.2</version>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-core-asl</artifactId>
<version>${jackson-version}</version>
</dependency>
<dependency>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-mapper-asl</artifactId>
<version>${jackson-version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-core</artifactId>
<version>${hadoop-version}</version>
<exclusions>
<!-- hadoop's transative dependencies are such a pig -->
<exclusion>
<groupId>commons-cli</groupId>
<artifactId>commons-cli</artifactId>
</exclusion>
<exclusion>
<groupId>xmlenc</groupId>
<artifactId>xmlenc</artifactId>
</exclusion>
<exclusion>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.commons</groupId>
<artifactId>commons-math</artifactId>
</exclusion>
<exclusion>
<groupId>commons-net</groupId>
<artifactId>commons-net</artifactId>
</exclusion>
<exclusion>
<groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId>
</exclusion>
<exclusion>
<groupId>tomcat</groupId>
<artifactId>jasper-runtime</artifactId>
</exclusion>
<exclusion>
<groupId>tomcat</groupId>
<artifactId>jasper-compiler</artifactId>
</exclusion>
<exclusion>
<groupId>commons-el</groupId>
<artifactId>commons-el</artifactId>
</exclusion>
<exclusion>
<groupId>net.java.dev.jets3t</groupId>
<artifactId>jets3t</artifactId>
</exclusion>
<exclusion>
<groupId>net.sf.kosmosfs</groupId>
<artifactId>kfs</artifactId>
</exclusion>
<exclusion>
<groupId>hsqldb</groupId>
<artifactId>hsqldb</artifactId>
</exclusion>
<exclusion>
<groupId>oro</groupId>
<artifactId>oro</artifactId>
</exclusion>
<exclusion>
<groupId>org.eclipse.jdt</groupId>
<artifactId>core</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- Testing Dependencies -->
<dependency>
<groupId>org.apache.activemq</groupId>
<artifactId>activemq-core</artifactId>
<version>5.7-SNAPSHOT</version>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.activemq</groupId>
<artifactId>activemq-console</artifactId>
<version>5.7-SNAPSHOT</version>
<scope>test</scope>
</dependency>
<!-- Hadoop Testing Deps -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-test</artifactId>
<version>${hadoop-version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>commons-lang</groupId>
<artifactId>commons-lang</artifactId>
<version>2.6</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty</artifactId>
<version>6.1.26</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty-util</artifactId>
<version>6.1.26</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>tomcat</groupId>
<artifactId>jasper-runtime</artifactId>
<version>5.5.12</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>tomcat</groupId>
<artifactId>jasper-compiler</artifactId>
<version>5.5.12</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jsp-api-2.1</artifactId>
<version>6.1.14</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jsp-2.1</artifactId>
<version>6.1.14</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-math</artifactId>
<version>2.2</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.scalatest</groupId>
<artifactId>scalatest_2.9.1</artifactId>
<version>${scalatest-version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.scala-tools</groupId>
<artifactId>maven-scala-plugin</artifactId>
<version>${scala-plugin-version}</version>
<executions>
<execution>
<id>compile</id>
<goals><goal>compile</goal> </goals>
<phase>compile</phase>
</execution>
<execution>
<id>test-compile</id>
<goals>
<goal>testCompile</goal>
</goals>
<phase>test-compile</phase>
</execution>
<execution>
<phase>process-resources</phase>
<goals>
<goal>compile</goal>
</goals>
</execution>
</executions>
<configuration>
<jvmArgs>
<jvmArg>-Xmx1024m</jvmArg>
<jvmArg>-Xss8m</jvmArg>
</jvmArgs>
<scalaVersion>${scala-version}</scalaVersion>
<args>
<arg>-deprecation</arg>
</args>
<compilerPlugins>
<compilerPlugin>
<groupId>org.fusesource.jvmassert</groupId>
<artifactId>jvmassert</artifactId>
<version>1.1</version>
</compilerPlugin>
</compilerPlugins>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<!-- we must turn off the use of system class loader so our tests can find stuff - otherwise ScalaSupport compiler can't find stuff -->
<useSystemClassLoader>false</useSystemClassLoader>
<!--forkMode>pertest</forkMode-->
<childDelegation>false</childDelegation>
<useFile>true</useFile>
<failIfNoTests>false</failIfNoTests>
</configuration>
</plugin>
<plugin>
<groupId>org.fusesource.hawtbuf</groupId>
<artifactId>hawtbuf-protoc</artifactId>
<version>${hawtbuf-version}</version>
<configuration>
<type>alt</type>
</configuration>
<executions>
<execution>
<goals>
<goal>compile</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.fusesource.mvnplugins</groupId>
<artifactId>maven-uberize-plugin</artifactId>
<version>1.14</version>
<executions>
<execution>
<id>all</id>
<phase>package</phase>
<goals><goal>uberize</goal></goals>
</execution>
</executions>
<configuration>
<uberArtifactAttached>true</uberArtifactAttached>
<uberClassifierName>uber</uberClassifierName>
<artifactSet>
<includes>
<include>org.scala-lang:scala-library</include>
<include>org.fusesource.hawtdispatch:hawtdispatch</include>
<include>org.fusesource.hawtdispatch:hawtdispatch-scala</include>
<include>org.fusesource.hawtbuf:hawtbuf</include>
<include>org.fusesource.hawtbuf:hawtbuf-proto</include>
<include>org.iq80.leveldb:leveldb-api</include>
<!--
<include>org.iq80.leveldb:leveldb</include>
<include>org.xerial.snappy:snappy-java</include>
<include>com.google.guava:guava</include>
-->
<include>org.xerial.snappy:snappy-java</include>
<include>org.fusesource.leveldbjni:leveldbjni</include>
<include>org.fusesource.leveldbjni:leveldbjni-osx</include>
<include>org.fusesource.leveldbjni:leveldbjni-linux32</include>
<include>org.fusesource.leveldbjni:leveldbjni-linux64</include>
<include>org.fusesource.hawtjni:hawtjni-runtime</include>
<!-- include bits need to access hdfs as a client -->
<include>org.apache.hadoop:hadoop-core</include>
<include>commons-configuration:commons-configuration</include>
<include>org.codehaus.jackson:jackson-mapper-asl</include>
<include>org.codehaus.jackson:jackson-core-asl</include>
</includes>
</artifactSet>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.felix</groupId>
<artifactId>maven-bundle-plugin</artifactId>
<configuration>
<classifier>bundle</classifier>
<excludeDependencies />
<instructions>
<Bundle-SymbolicName>${project.groupId}.${project.artifactId}</Bundle-SymbolicName>
<Fragment-Host>org.apache.activemq.activemq-core</Fragment-Host>
<Export-Package>
org.apache.activemq.leveldb*;version=${project.version};-noimport:=;-split-package:=merge-last,
</Export-Package>
<Embed-Dependency>*;inline=**;artifactId=
hawtjni-runtime|hawtbuf|hawtbuf-proto|hawtdispatch|hawtdispatch-scala|scala-library|
leveldb-api|leveldbjni|leveldbjni-osx|leveldbjni-linux32|leveldbjni-linux64|
hadoop-core|commons-configuration|jackson-mapper-asl|jackson-core-asl|commons-lang</Embed-Dependency>
<Embed-Transitive>true</Embed-Transitive>
<Import-Package>*;resolution:=optional</Import-Package>
</instructions>
</configuration>
<executions>
<execution>
<id>bundle</id>
<phase>package</phase>
<goals>
<goal>bundle</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<forkMode>always</forkMode>
<excludes>
<exclude>**/EnqueueRateScenariosTest.*</exclude>
</excludes>
</configuration>
</plugin>
</plugins>
</build>
</project>

View File

@ -0,0 +1,95 @@
# The LevelDB Store
## Overview
The LevelDB Store is message store implementation that can be used in ActiveMQ messaging servers.
## LevelDB vs KahaDB
How is the LevelDB Store better than the default KahaDB store:
* It maitains fewer index entries per message than KahaDB which means it has a higher persistent throughput.
* Faster recovery when a broker restarts
* Since the broker tends to write and read queue entries sequentially, the LevelDB based index provide a much better performance than the B-Tree based indexes of KahaDB which increases throughput.
* Unlike the KahaDB indexes, the LevelDB indexes support concurrent read access which further improves read throughput.
* Pauseless data log file garbage collection cycles.
* It uses fewer read IO operations to load stored messages.
* If a message is copied to multiple queues (Typically happens if your using virtual topics with multiple
consumers), then LevelDB will only journal the payload of the message once. KahaDB will journal it multiple times.
* It exposes it's status via JMX for monitoring
* Supports replication to get High Availability
See the following chart to get an idea on how much better you can expect the LevelDB store to perform vs the KahaDB store:
![kahadb-vs-leveldb.png ](https://raw.github.com/fusesource/fuse-extra/master/fusemq-leveldb/kahadb-vs-leveldb.png)
## How to Use with ActiveMQ
Update the broker configuration file and change `persistenceAdapter` elements
settings so that it uses the LevelDB store using the following spring XML
configuration example:
<persistenceAdapter>
<levelDB directory="${activemq.base}/data/leveldb" logSize="107374182"/>
</persistenceAdapter>
### Configuration / Property Reference
*TODO*
### JMX Attribute and Operation Reference
*TODO*
## Known Limitations
* XA Transactions not supported yet
* The store does not do any dup detection of messages.
## Built in High Availability Support
You can also use a High Availability (HA) version of the LevelDB store which
works with Hadoop based file systems to achive HA of your stored messages.
**Q:** What are the requirements?
**A:** An existing Hadoop 1.0.0 cluster
**Q:** How does it work during the normal operating cycle?
A: It uses HDFS to store a highly available copy of the local leveldb storage files. As local log files are being written to, it also maintains a mirror copy on HDFS. If you have sync enabled on the store, a HDFS file sync is performed instead of a local disk sync. When the index is check pointed, we upload any previously not uploaded leveldb .sst files to HDFS.
**Q:** What happens when a broker fails and we startup a new slave to take over?
**A:** The slave will download from HDFS the log files and the .sst files associated with the latest uploaded index. Then normal leveldb store recovery kicks in which updates the index using the log files.
**Q:** How do I use the HA version of the LevelDB store?
**A:** Update your activemq.xml to use a `persistenceAdapter` setting similar to the following:
<persistenceAdapter>
<bean xmlns="http://www.springframework.org/schema/beans"
class="org.apache.activemq.leveldb.HALevelDBStore">
<!-- File system URL to replicate to -->
<property name="dfsUrl" value="hdfs://hadoop-name-node"/>
<!-- Directory in the file system to store the data in -->
<property name="dfsDirectory" value="activemq"/>
<property name="directory" value="${activemq.base}/data/leveldb"/>
<property name="logSize" value="107374182"/>
<!-- <property name="sync" value="false"/> -->
</bean>
</persistenceAdapter>
Notice the implementation class name changes to 'HALevelDBStore'
Instead of using a 'dfsUrl' property you can instead also just load an existing Hadoop configuration file if it's available on your system, for example:
<property name="dfsConfig" value="/opt/hadoop-1.0.0/conf/core-site.xml"/>
**Q:** Who handles starting up the Slave?
**A:** You do. :) This implementation assumes master startup/elections are performed externally and that 2 brokers are never running against the same HDFS file path. In practice this means you need something like ZooKeeper to control starting new brokers to take over failed masters.
**Q:** Can this run against something other than HDFS?
**A:** It should be able to run with any Hadoop supported file system like CloudStore, S3, MapR, NFS, etc (Well at least in theory, I've only tested against HDFS).
**Q:** Can 'X' performance be optimized?
**A:** There are bunch of way to improve the performance of many of the things that current version of the store is doing. For example, aggregating the .sst files into an archive to make more efficient use of HDFS, concurrent downloading to improve recovery performance. Lazy downloading of the oldest log files to make recovery faster. Async HDFS writes to avoid blocking local updates. Running brokers in a warm 'standy' mode which keep downloading new log updates and applying index updates from the master as they get uploaded to HDFS to get faster failovers.
**Q:** Does the broker fail if HDFS fails?
**A:** Currently, yes. But it should be possible to make the master resilient to HDFS failures.

View File

@ -16,7 +16,7 @@
*/
package org.apache.activemq.store.leveldb;
import org.fusesource.mq.leveldb.LevelDBStore;
import org.apache.activemq.leveldb.LevelDBStore;
/**

View File

@ -0,0 +1,56 @@
// Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package org.apache.activemq.leveldb.record;
option java_multiple_files = true;
//
// We create a collection record for each
// transaction, queue, topic.
//
message CollectionKey {
required int64 key = 1;
}
message CollectionRecord {
optional int64 key = 1;
optional int32 type = 2;
optional bytes meta = 3 [java_override_type = "Buffer"];
}
//
// We create a entry record for each message, subscription,
// and subscription position.
//
message EntryKey {
required int64 collection_key = 1;
required bytes entry_key = 2 [java_override_type = "Buffer"];
}
message EntryRecord {
optional int64 collection_key = 1;
optional bytes entry_key = 2 [java_override_type = "Buffer"];
optional int64 value_location = 3;
optional int32 value_length = 4;
optional bytes value = 5 [java_override_type = "Buffer"];
optional bytes meta = 6 [java_override_type = "Buffer"];
}
message SubscriptionRecord {
optional int64 topic_key = 1;
optional string client_id = 2;
optional string subscription_name = 3;
optional string selector = 4;
optional string destination_name = 5;
}

View File

@ -0,0 +1,139 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq
import java.nio.ByteBuffer
import org.fusesource.hawtbuf.Buffer
import org.xerial.snappy.{Snappy => Xerial}
import org.iq80.snappy.{Snappy => Iq80}
/**
* <p>
* A Snappy abstraction which attempts uses the iq80 implementation and falls back
* to the xerial Snappy implementation it cannot be loaded. You can change the
* load order by setting the 'leveldb.snappy' system property. Example:
*
* <code>
* -Dleveldb.snappy=xerial,iq80
* </code>
*
* The system property can also be configured with the name of a class which
* implements the Snappy.SPI interface.
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
package object leveldb {
final val Snappy = {
var attempt:SnappyTrait = null
System.getProperty("leveldb.snappy", "iq80,xerial").split(",").foreach { x =>
if( attempt==null ) {
try {
var name = x.trim();
name = name.toLowerCase match {
case "xerial" => "org.apache.activemq.leveldb.XerialSnappy"
case "iq80" => "org.apache.activemq.leveldb.IQ80Snappy"
case _ => name
}
attempt = Thread.currentThread().getContextClassLoader().loadClass(name).newInstance().asInstanceOf[SnappyTrait];
attempt.compress("test")
} catch {
case x =>
attempt = null
}
}
}
attempt
}
trait SnappyTrait {
def uncompressed_length(input: Buffer):Int
def uncompress(input: Buffer, output:Buffer): Int
def max_compressed_length(length: Int): Int
def compress(input: Buffer, output: Buffer): Int
def compress(input: Buffer):Buffer = {
val compressed = new Buffer(max_compressed_length(input.length))
compressed.length = compress(input, compressed)
compressed
}
def compress(text: String): Buffer = {
val uncompressed = new Buffer(text.getBytes("UTF-8"))
val compressed = new Buffer(max_compressed_length(uncompressed.length))
compressed.length = compress(uncompressed, compressed)
return compressed
}
def uncompress(input: Buffer):Buffer = {
val uncompressed = new Buffer(uncompressed_length(input))
uncompressed.length = uncompress(input, uncompressed)
uncompressed
}
def uncompress(compressed: ByteBuffer, uncompressed: ByteBuffer): Int = {
val input = if (compressed.hasArray) {
new Buffer(compressed.array, compressed.arrayOffset + compressed.position, compressed.remaining)
} else {
val t = new Buffer(compressed.remaining)
compressed.mark
compressed.get(t.data)
compressed.reset
t
}
val output = if (uncompressed.hasArray) {
new Buffer(uncompressed.array, uncompressed.arrayOffset + uncompressed.position, uncompressed.capacity()-uncompressed.position)
} else {
new Buffer(uncompressed_length(input))
}
output.length = uncompress(input, output)
if (uncompressed.hasArray) {
uncompressed.limit(uncompressed.position + output.length)
} else {
val p = uncompressed.position
uncompressed.limit(uncompressed.capacity)
uncompressed.put(output.data, output.offset, output.length)
uncompressed.flip.position(p)
}
return output.length
}
}
}
package leveldb {
class XerialSnappy extends SnappyTrait {
override def uncompress(compressed: ByteBuffer, uncompressed: ByteBuffer) = Xerial.uncompress(compressed, uncompressed)
def uncompressed_length(input: Buffer) = Xerial.uncompressedLength(input.data, input.offset, input.length)
def uncompress(input: Buffer, output: Buffer) = Xerial.uncompress(input.data, input.offset, input.length, output.data, output.offset)
def max_compressed_length(length: Int) = Xerial.maxCompressedLength(length)
def compress(input: Buffer, output: Buffer) = Xerial.compress(input.data, input.offset, input.length, output.data, output.offset)
override def compress(text: String) = new Buffer(Xerial.compress(text))
}
class IQ80Snappy extends SnappyTrait {
def uncompressed_length(input: Buffer) = Iq80.getUncompressedLength(input.data, input.offset)
def uncompress(input: Buffer, output: Buffer): Int = Iq80.uncompress(input.data, input.offset, input.length, output.data, output.offset)
def compress(input: Buffer, output: Buffer): Int = Iq80.compress(input.data, input.offset, input.length, output.data, output.offset)
def max_compressed_length(length: Int) = Iq80.maxCompressedLength(length)
}
}

View File

@ -0,0 +1,735 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb
import org.fusesource.hawtdispatch._
import org.fusesource.hawtdispatch.BaseRetained
import java.util.concurrent._
import atomic._
import org.fusesource.hawtbuf.Buffer
import org.apache.activemq.store.MessageRecoveryListener
import java.lang.ref.WeakReference
import scala.Option._
import org.fusesource.hawtbuf.Buffer._
import org.apache.activemq.command._
import org.apache.activemq.leveldb.record.{SubscriptionRecord, CollectionRecord}
import util.TimeMetric
import java.util.HashMap
import collection.mutable.{HashSet, ListBuffer}
import org.apache.activemq.thread.DefaultThreadPools
case class MessageRecord(id:MessageId, data:Buffer, syncNeeded:Boolean) {
var locator:(Long, Int) = _
}
case class QueueEntryRecord(id:MessageId, queueKey:Long, queueSeq:Long)
case class QueueRecord(id:ActiveMQDestination, queue_key:Long)
case class QueueEntryRange()
case class SubAckRecord(subKey:Long, ackPosition:Long)
sealed trait UowState {
def stage:Int
}
// UoW is initial open.
object UowOpen extends UowState {
override def stage = 0
override def toString = "UowOpen"
}
// UoW is Committed once the broker finished creating it.
object UowClosed extends UowState {
override def stage = 1
override def toString = "UowClosed"
}
// UOW is delayed until we send it to get flushed.
object UowDelayed extends UowState {
override def stage = 2
override def toString = "UowDelayed"
}
object UowFlushQueued extends UowState {
override def stage = 3
override def toString = "UowFlushQueued"
}
object UowFlushing extends UowState {
override def stage = 4
override def toString = "UowFlushing"
}
// Then it moves on to be flushed. Flushed just
// means the message has been written to disk
// and out of memory
object UowFlushed extends UowState {
override def stage = 5
override def toString = "UowFlushed"
}
// Once completed then you know it has been synced to disk.
object UowCompleted extends UowState {
override def stage = 6
override def toString = "UowCompleted"
}
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
case class CountDownFuture(completed:CountDownLatch=new CountDownLatch(1)) extends java.util.concurrent.Future[Object] {
def countDown = completed.countDown()
def cancel(mayInterruptIfRunning: Boolean) = false
def isCancelled = false
def get() = {
completed.await()
null
}
def get(p1: Long, p2: TimeUnit) = {
if(completed.await(p1, p2)) {
null
} else {
throw new TimeoutException
}
}
def isDone = completed.await(0, TimeUnit.SECONDS);
}
object UowManagerConstants {
val QUEUE_COLLECTION_TYPE = 1
val TOPIC_COLLECTION_TYPE = 2
val TRANSACTION_COLLECTION_TYPE = 3
val SUBSCRIPTION_COLLECTION_TYPE = 4
case class QueueEntryKey(queue:Long, seq:Long)
def key(x:QueueEntryRecord) = QueueEntryKey(x.queueKey, x.queueSeq)
}
import UowManagerConstants._
class DelayableUOW(val manager:DBManager) extends BaseRetained {
val countDownFuture = CountDownFuture()
var canceled = false;
val uowId:Int = manager.lastUowId.incrementAndGet()
var actions = Map[MessageId, MessageAction]()
var subAcks = ListBuffer[SubAckRecord]()
var completed = false
var disableDelay = false
var delayableActions = 0
private var _state:UowState = UowOpen
def state = this._state
def state_=(next:UowState) {
assert(this._state.stage < next.stage)
this._state = next
}
def syncNeeded = actions.find( _._2.syncNeeded ).isDefined
def size = 100+actions.foldLeft(0L){ case (sum, entry) =>
sum + (entry._2.size+100)
} + (subAcks.size * 100)
class MessageAction {
var id:MessageId = _
var messageRecord: MessageRecord = null
var enqueues = ListBuffer[QueueEntryRecord]()
var dequeues = ListBuffer[QueueEntryRecord]()
def uow = DelayableUOW.this
def isEmpty() = messageRecord==null && enqueues==Nil && dequeues==Nil
def cancel() = {
uow.rm(id)
}
def syncNeeded = messageRecord!=null && messageRecord.syncNeeded
def size = (if(messageRecord!=null) messageRecord.data.length+20 else 0) + ((enqueues.size+dequeues.size)*50)
def addToPendingStore() = {
var set = manager.pendingStores.get(id)
if(set==null) {
set = HashSet()
manager.pendingStores.put(id, set)
}
set.add(this)
}
def removeFromPendingStore() = {
var set = manager.pendingStores.get(id)
if(set!=null) {
set.remove(this)
if(set.isEmpty) {
manager.pendingStores.remove(id)
}
}
}
}
def completeAsap() = this.synchronized { disableDelay=true }
def delayable = !disableDelay && delayableActions>0 && manager.flushDelay>=0
def rm(msg:MessageId) = {
actions -= msg
if( actions.isEmpty && state.stage < UowFlushing.stage ) {
cancel
}
}
def cancel = {
manager.dispatchQueue.assertExecuting()
manager.uowCanceledCounter += 1
canceled = true
manager.flush_queue.remove(uowId)
onCompleted
}
def getAction(id:MessageId) = {
actions.get(id) match {
case Some(x) => x
case None =>
val x = new MessageAction
x.id = id
actions += id->x
x
}
}
def updateAckPosition(sub:DurableSubscription) = {
subAcks += SubAckRecord(sub.subKey, sub.lastAckPosition)
}
def enqueue(queueKey:Long, queueSeq:Long, message:Message, delay_enqueue:Boolean) = {
var delay = delay_enqueue && message.getTransactionId==null
if(delay ) {
manager.uowEnqueueDelayReqested += 1
} else {
manager.uowEnqueueNodelayReqested += 1
}
val id = message.getMessageId
val messageRecord = id.getDataLocator match {
case null =>
var packet = manager.parent.wireFormat.marshal(message)
var data = new Buffer(packet.data, packet.offset, packet.length)
if( manager.snappyCompressLogs ) {
data = Snappy.compress(data)
}
val record = MessageRecord(id, data, message.isResponseRequired)
id.setDataLocator(record)
record
case record:MessageRecord =>
record
case x:(Long, Int) =>
null
}
val entry = QueueEntryRecord(id, queueKey, queueSeq)
assert(id.getEntryLocator == null)
id.setEntryLocator((queueKey, queueSeq))
val a = this.synchronized {
if( !delay )
disableDelay = true
val action = getAction(entry.id)
action.messageRecord = messageRecord
action.enqueues += entry
delayableActions += 1
action
}
manager.dispatchQueue {
manager.cancelable_enqueue_actions.put(key(entry), a)
a.addToPendingStore()
}
countDownFuture
}
def dequeue(queueKey:Long, id:MessageId) = {
val (queueKey, queueSeq) = id.getEntryLocator.asInstanceOf[(Long, Long)];
val entry = QueueEntryRecord(id, queueKey, queueSeq)
this.synchronized {
getAction(id).dequeues += entry
}
countDownFuture
}
def complete_asap = this.synchronized {
disableDelay=true
if( state eq UowDelayed ) {
manager.enqueueFlush(this)
}
}
var complete_listeners = ListBuffer[()=>Unit]()
def addCompleteListener(func: =>Unit) = {
complete_listeners.append( func _ )
}
var asyncCapacityUsed = 0L
var disposed_at = 0L
override def dispose = this.synchronized {
state = UowClosed
disposed_at = System.nanoTime()
if( !syncNeeded ) {
val s = size
if( manager.asyncCapacityRemaining.addAndGet(-s) > 0 ) {
asyncCapacityUsed = s
countDownFuture.countDown
DefaultThreadPools.getDefaultTaskRunnerFactory.execute(^{
complete_listeners.foreach(_())
})
} else {
manager.asyncCapacityRemaining.addAndGet(s)
}
}
// closeSource.merge(this)
manager.dispatchQueue {
manager.processClosed(this)
}
}
def onCompleted() = this.synchronized {
if ( state.stage < UowCompleted.stage ) {
state = UowCompleted
if( asyncCapacityUsed != 0 ) {
manager.asyncCapacityRemaining.addAndGet(asyncCapacityUsed)
asyncCapacityUsed = 0
} else {
manager.uow_complete_latency.add(System.nanoTime() - disposed_at)
countDownFuture.countDown
DefaultThreadPools.getDefaultTaskRunnerFactory.execute(^{
complete_listeners.foreach(_())
})
}
for( (id, action) <- actions ) {
if( !action.enqueues.isEmpty ) {
action.removeFromPendingStore()
}
for( queueEntry <- action.enqueues ) {
manager.cancelable_enqueue_actions.remove(key(queueEntry))
}
}
super.dispose
}
}
}
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class DBManager(val parent:LevelDBStore) {
var lastCollectionKey = new AtomicLong(0)
val client:LevelDBClient = parent.createClient
def writeExecutor = client.writeExecutor
def flushDelay = parent.flushDelay
val dispatchQueue = createQueue(toString)
// val aggregator = new AggregatingExecutor(dispatchQueue)
val asyncCapacityRemaining = new AtomicLong(0L)
def createUow() = new DelayableUOW(this)
var uowEnqueueDelayReqested = 0L
var uowEnqueueNodelayReqested = 0L
var uowClosedCounter = 0L
var uowCanceledCounter = 0L
var uowStoringCounter = 0L
var uowStoredCounter = 0L
val uow_complete_latency = TimeMetric()
// val closeSource = createSource(new ListEventAggregator[DelayableUOW](), dispatchQueue)
// closeSource.setEventHandler(^{
// closeSource.getData.foreach { uow =>
// processClosed(uow)
// }
// });
// closeSource.resume
var pendingStores = new ConcurrentHashMap[MessageId, HashSet[DelayableUOW#MessageAction]]()
var cancelable_enqueue_actions = new HashMap[QueueEntryKey, DelayableUOW#MessageAction]()
val lastUowId = new AtomicInteger(1)
def processClosed(uow:DelayableUOW) = {
dispatchQueue.assertExecuting()
uowClosedCounter += 1
// Broker could issue a flush_message call before
// this stage runs.. which make the stage jump over UowDelayed
if( uow.state.stage < UowDelayed.stage ) {
uow.state = UowDelayed
}
if( uow.state.stage < UowFlushing.stage ) {
uow.actions.foreach { case (id, action) =>
// The UoW may have been canceled.
if( action.messageRecord!=null && action.enqueues.isEmpty ) {
action.removeFromPendingStore()
action.messageRecord = null
uow.delayableActions -= 1
}
if( action.isEmpty ) {
action.cancel()
}
// dequeues can cancel out previous enqueues
action.dequeues.foreach { entry=>
val entry_key = key(entry)
val prev_action:DelayableUOW#MessageAction = cancelable_enqueue_actions.remove(entry_key)
if( prev_action!=null ) {
val prev_uow = prev_action.uow
prev_uow.synchronized {
if( !prev_uow.canceled ) {
prev_uow.delayableActions -= 1
// yay we can cancel out a previous enqueue
prev_action.enqueues = prev_action.enqueues.filterNot( x=> key(x) == entry_key )
if( prev_uow.state.stage >= UowDelayed.stage ) {
// if the message is not in any queues.. we can gc it..
if( prev_action.enqueues == Nil && prev_action.messageRecord !=null ) {
prev_action.removeFromPendingStore()
prev_action.messageRecord = null
prev_uow.delayableActions -= 1
}
// Cancel the action if it's now empty
if( prev_action.isEmpty ) {
prev_action.cancel()
} else if( !prev_uow.delayable ) {
// flush it if there is no point in delaying anymore
prev_uow.complete_asap
}
}
}
}
// since we canceled out the previous enqueue.. now cancel out the action
action.dequeues = action.dequeues.filterNot( _ == entry)
if( action.isEmpty ) {
action.cancel()
}
}
}
}
}
if( !uow.canceled && uow.state.stage < UowFlushQueued.stage ) {
if( uow.delayable ) {
// Let the uow get GCed if its' canceled during the delay window..
val ref = new WeakReference[DelayableUOW](uow)
scheduleFlush(ref)
} else {
enqueueFlush(uow)
}
}
}
private def scheduleFlush(ref: WeakReference[DelayableUOW]) {
dispatchQueue.executeAfter(flushDelay, TimeUnit.MILLISECONDS, ^ {
val uow = ref.get();
if (uow != null) {
enqueueFlush(uow)
}
})
}
val flush_queue = new java.util.LinkedHashMap[Long, DelayableUOW]()
def enqueueFlush(uow:DelayableUOW) = {
dispatchQueue.assertExecuting()
if( uow!=null && !uow.canceled && uow.state.stage < UowFlushQueued.stage ) {
uow.state = UowFlushQueued
flush_queue.put (uow.uowId, uow)
flushSource.merge(1)
}
}
val flushSource = createSource(EventAggregators.INTEGER_ADD, dispatchQueue)
flushSource.setEventHandler(^{drainFlushes});
flushSource.resume
def drainFlushes:Unit = {
dispatchQueue.assertExecuting()
if( !started ) {
return
}
// Some UOWs may have been canceled.
import collection.JavaConversions._
val values = flush_queue.values().toSeq.toArray
flush_queue.clear()
val uows = values.flatMap { uow=>
if( uow.canceled ) {
None
} else {
// It will not be possible to cancel the UOW anymore..
uow.state = UowFlushing
uow.actions.foreach { case (_, action) =>
action.enqueues.foreach { queue_entry=>
val action = cancelable_enqueue_actions.remove(key(queue_entry))
assert(action!=null)
}
}
Some(uow)
}
}
if( !uows.isEmpty ) {
uowStoringCounter += uows.size
flushSource.suspend
writeExecutor {
client.store(uows)
flushSource.resume
dispatchQueue {
uowStoredCounter += uows.size
uows.foreach { uow=>
uow.onCompleted
}
}
}
}
}
var started = false
def snappyCompressLogs = parent.snappyCompressLogs
def start = {
asyncCapacityRemaining.set(parent.asyncBufferSize)
client.start()
dispatchQueue.sync {
started = true
pollGc
if(parent.monitorStats) {
monitorStats
}
}
}
def stop() = {
dispatchQueue.sync {
started = false
}
client.stop()
}
def pollGc:Unit = dispatchQueue.after(10, TimeUnit.SECONDS) {
if( started ) {
val positions = parent.getTopicGCPositions
writeExecutor {
if( started ) {
client.gc(positions)
pollGc
}
}
}
}
def monitorStats:Unit = dispatchQueue.after(1, TimeUnit.SECONDS) {
if( started ) {
println(("committed: %d, canceled: %d, storing: %d, stored: %d, " +
"uow complete: %,.3f ms, " +
"index write: %,.3f ms, " +
"log write: %,.3f ms, log flush: %,.3f ms, log rotate: %,.3f ms"+
"add msg: %,.3f ms, add enqueue: %,.3f ms, " +
"uowEnqueueDelayReqested: %d, uowEnqueueNodelayReqested: %d "
).format(
uowClosedCounter, uowCanceledCounter, uowStoringCounter, uowStoredCounter,
uow_complete_latency.reset,
client.max_index_write_latency.reset,
client.log.max_log_write_latency.reset, client.log.max_log_flush_latency.reset, client.log.max_log_rotate_latency.reset,
client.max_write_message_latency.reset, client.max_write_enqueue_latency.reset,
uowEnqueueDelayReqested, uowEnqueueNodelayReqested
))
uowClosedCounter = 0
// uowCanceledCounter = 0
uowStoringCounter = 0
uowStoredCounter = 0
monitorStats
}
}
/////////////////////////////////////////////////////////////////////
//
// Implementation of the Store interface
//
/////////////////////////////////////////////////////////////////////
def checkpoint(sync:Boolean) = writeExecutor.sync {
client.snapshotIndex(sync)
}
def purge = writeExecutor.sync {
client.purge
lastCollectionKey.set(1)
}
def getLastQueueEntrySeq(key:Long) = {
client.getLastQueueEntrySeq(key)
}
def collectionEmpty(key:Long) = writeExecutor.sync {
client.collectionEmpty(key)
}
def collectionSize(key:Long) = {
client.collectionSize(key)
}
def collectionIsEmpty(key:Long) = {
client.collectionIsEmpty(key)
}
def cursorMessages(key:Long, listener:MessageRecoveryListener, startPos:Long) = {
var nextPos = startPos;
client.queueCursor(key, nextPos) { msg =>
if( listener.hasSpace ) {
listener.recoverMessage(msg)
nextPos += 1
true
} else {
false
}
}
nextPos
}
def queuePosition(id: MessageId):Long = {
id.getEntryLocator.asInstanceOf[(Long, Long)]._2
}
def createQueueStore(dest:ActiveMQQueue):parent.LevelDBMessageStore = {
parent.createQueueMessageStore(dest, createStore(dest, QUEUE_COLLECTION_TYPE))
}
def destroyQueueStore(key:Long) = writeExecutor.sync {
client.removeCollection(key)
}
def getLogAppendPosition = writeExecutor.sync {
client.getLogAppendPosition
}
def addSubscription(topic_key:Long, info:SubscriptionInfo):DurableSubscription = {
val record = new SubscriptionRecord.Bean
record.setTopicKey(topic_key)
record.setClientId(info.getClientId)
record.setSubscriptionName(info.getSubcriptionName)
if( info.getSelector!=null ) {
record.setSelector(info.getSelector)
}
if( info.getDestination!=null ) {
record.setDestinationName(info.getDestination.getQualifiedName)
}
val collection = new CollectionRecord.Bean()
collection.setType(SUBSCRIPTION_COLLECTION_TYPE)
collection.setKey(lastCollectionKey.incrementAndGet())
collection.setMeta(record.freeze().toUnframedBuffer)
val buffer = collection.freeze()
buffer.toFramedBuffer // eager encode the record.
writeExecutor.sync {
client.addCollection(buffer)
}
DurableSubscription(collection.getKey, topic_key, info)
}
def removeSubscription(sub:DurableSubscription) = {
client.removeCollection(sub.subKey)
}
def createTopicStore(dest:ActiveMQTopic) = {
var key = createStore(dest, TOPIC_COLLECTION_TYPE)
parent.createTopicMessageStore(dest, key)
}
def createStore(destination:ActiveMQDestination, collectionType:Int) = {
val collection = new CollectionRecord.Bean()
collection.setType(collectionType)
collection.setMeta(utf8(destination.getQualifiedName))
collection.setKey(lastCollectionKey.incrementAndGet())
val buffer = collection.freeze()
buffer.toFramedBuffer // eager encode the record.
writeExecutor.sync {
client.addCollection(buffer)
}
collection.getKey
}
def loadCollections = {
val collections = writeExecutor.sync {
client.listCollections
}
var last = 0L
collections.foreach { case (key, record) =>
last = key
record.getType match {
case QUEUE_COLLECTION_TYPE =>
val dest = ActiveMQDestination.createDestination(record.getMeta.utf8().toString, ActiveMQDestination.QUEUE_TYPE).asInstanceOf[ActiveMQQueue]
parent.createQueueMessageStore(dest, key)
case TOPIC_COLLECTION_TYPE =>
val dest = ActiveMQDestination.createDestination(record.getMeta.utf8().toString, ActiveMQDestination.TOPIC_TYPE).asInstanceOf[ActiveMQTopic]
parent.createTopicMessageStore(dest, key)
case SUBSCRIPTION_COLLECTION_TYPE =>
val sr = SubscriptionRecord.FACTORY.parseUnframed(record.getMeta)
val info = new SubscriptionInfo
info.setClientId(sr.getClientId)
info.setSubcriptionName(sr.getSubscriptionName)
if( sr.hasSelector ) {
info.setSelector(sr.getSelector)
}
if(sr.hasDestinationName) {
info.setSubscribedDestination(ActiveMQDestination.createDestination(sr.getDestinationName, ActiveMQDestination.TOPIC_TYPE))
}
var sub = DurableSubscription(key, sr.getTopicKey, info)
sub.lastAckPosition = client.getAckPosition(key);
parent.createSubscription(sub)
case _ =>
}
}
lastCollectionKey.set(last)
}
def getMessage(x: MessageId):Message = {
val id = Option(pendingStores.get(x)).flatMap(_.headOption).map(_.id).getOrElse(x)
val locator = id.getDataLocator()
val msg = client.getMessage(locator)
msg.setMessageId(id)
msg
}
}

View File

@ -0,0 +1,398 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb
import org.apache.activemq.leveldb.util._
import org.fusesource.leveldbjni.internal.Util
import FileSupport._
import org.codehaus.jackson.map.ObjectMapper
import java.io._
import scala.collection.mutable._
import scala.collection.immutable.TreeMap
import org.fusesource.hawtbuf.{ByteArrayOutputStream, Buffer}
import org.apache.hadoop.fs.{FileSystem, Path}
/**
*
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
object JsonCodec {
final val mapper: ObjectMapper = new ObjectMapper
def decode[T](buffer: Buffer, clazz: Class[T]): T = {
val original = Thread.currentThread.getContextClassLoader
Thread.currentThread.setContextClassLoader(this.getClass.getClassLoader)
try {
return mapper.readValue(buffer.in, clazz)
} finally {
Thread.currentThread.setContextClassLoader(original)
}
}
def decode[T](is: InputStream, clazz : Class[T]): T = {
var original: ClassLoader = Thread.currentThread.getContextClassLoader
Thread.currentThread.setContextClassLoader(this.getClass.getClassLoader)
try {
return JsonCodec.mapper.readValue(is, clazz)
}
finally {
Thread.currentThread.setContextClassLoader(original)
}
}
def encode(value: AnyRef): Buffer = {
var baos = new ByteArrayOutputStream
mapper.writeValue(baos, value)
return baos.toBuffer
}
}
/**
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
object HALevelDBClient extends Log {
val MANIFEST_SUFFIX = ".mf"
val LOG_SUFFIX = LevelDBClient.LOG_SUFFIX
val INDEX_SUFFIX = LevelDBClient.INDEX_SUFFIX
def create_sequence_path(directory:Path, id:Long, suffix:String) = new Path(directory, ("%016x%s".format(id, suffix)))
def find_sequence_status(fs:FileSystem, directory:Path, suffix:String) = {
TreeMap((fs.listStatus(directory).flatMap { f =>
val name = f.getPath.getName
if( name.endsWith(suffix) ) {
try {
val base = name.stripSuffix(suffix)
val position = java.lang.Long.parseLong(base, 16);
Some(position -> f )
} catch {
case e:NumberFormatException => None
}
} else {
None
}
}): _* )
}
}
/**
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class HALevelDBClient(val store:HALevelDBStore) extends LevelDBClient(store) {
import HALevelDBClient._
case class Snapshot(current_manifest:String, files:Set[String])
var snapshots = TreeMap[Long, Snapshot]()
// Eventually we will allow warm standby slaves to add references to old
// snapshots so that we don't delete them while they are in the process
// of downloading the snapshot.
var snapshotRefCounters = HashMap[Long, LongCounter]()
var indexFileRefCounters = HashMap[String, LongCounter]()
def dfs = store.dfs
def dfsDirectory = new Path(store.dfsDirectory)
def dfsBlockSize = store.dfsBlockSize
def dfsReplication = store.dfsReplication
def remoteIndexPath = new Path(dfsDirectory, "index")
override def start() = {
retry {
directory.mkdirs()
dfs.mkdirs(dfsDirectory)
downloadLogFiles
dfs.mkdirs(remoteIndexPath)
downloadIndexFiles
}
super.start()
storeTrace("Master takeover by: "+store.containerId, true)
}
override def locked_purge = {
super.locked_purge
dfs.delete(dfsDirectory, true)
}
override def snapshotIndex(sync: Boolean) = {
val previous_snapshot = lastIndexSnapshotPos
super.snapshotIndex(sync)
// upload the snapshot to the dfs
uploadIndexFiles(lastIndexSnapshotPos)
// Drop the previous snapshot reference..
for( counter <- snapshotRefCounters.get(previous_snapshot)) {
if( counter.decrementAndGet() <= 0 ) {
snapshotRefCounters.remove(previous_snapshot)
}
}
gcSnapshotRefs
}
// downloads missing log files...
def downloadLogFiles {
val log_files = find_sequence_status(dfs, dfsDirectory, LOG_SUFFIX)
val downloads = log_files.flatMap( _ match {
case (id, status) =>
val target = LevelDBClient.create_sequence_file(directory, id, LOG_SUFFIX)
// is it missing or does the size not match?
if (!target.exists() || target.length() != status.getLen) {
Some((id, status))
} else {
None
}
})
if( !downloads.isEmpty ) {
val total_size = downloads.foldLeft(0L)((a,x)=> a+x._2.getLen)
downloads.foreach {
case (id, status) =>
val target = LevelDBClient.create_sequence_file(directory, id, LOG_SUFFIX)
// is it missing or does the size not match?
if (!target.exists() || target.length() != status.getLen) {
info("Downloading log file: "+status.getPath.getName)
using(dfs.open(status.getPath, 32*1024)) { is=>
using(new FileOutputStream(target)) { os=>
copy(is, os)
}
}
}
}
}
}
// See if there is a more recent index that can be downloaded.
def downloadIndexFiles {
snapshots = TreeMap()
dfs.listStatus(remoteIndexPath).foreach { status =>
val name = status.getPath.getName
indexFileRefCounters.put(name, new LongCounter())
if( name endsWith MANIFEST_SUFFIX ) {
info("Getting index snapshot manifest: "+status.getPath.getName)
val mf = using(dfs.open(status.getPath)) { is =>
JsonCodec.decode(is, classOf[IndexManifestDTO])
}
import collection.JavaConversions._
snapshots += mf.snapshot_id -> Snapshot(mf.current_manifest, Set(mf.files.toSeq:_*))
}
}
// Check for invalid snapshots..
for( (snapshotid, snapshot) <- snapshots) {
val matches = indexFileRefCounters.keySet & snapshot.files
if( matches.size != snapshot.files.size ) {
var path = create_sequence_path(remoteIndexPath, snapshotid, MANIFEST_SUFFIX)
warn("Deleting inconsistent snapshot manifest: "+path.getName)
dfs.delete(path, true)
snapshots -= snapshotid
}
}
// Add a ref to the last snapshot..
for( (snapshotid, _) <- snapshots.lastOption ) {
snapshotRefCounters.getOrElseUpdate(snapshotid, new LongCounter()).incrementAndGet()
}
// Increment index file refs..
for( key <- snapshotRefCounters.keys; snapshot <- snapshots.get(key); file <- snapshot.files ) {
indexFileRefCounters.getOrElseUpdate(file, new LongCounter()).incrementAndGet()
}
// Remove un-referenced index files.
for( (name, counter) <- indexFileRefCounters ) {
if( counter.get() <= 0 ) {
var path = new Path(remoteIndexPath, name)
info("Deleting unreferenced index file: "+path.getName)
dfs.delete(path, true)
indexFileRefCounters.remove(name)
}
}
val local_snapshots = Map(LevelDBClient.find_sequence_files(directory, INDEX_SUFFIX).values.flatten { dir =>
if( dir.isDirectory ) dir.listFiles() else Array[File]()
}.map(x=> (x.getName, x)).toSeq:_*)
for( (id, snapshot) <- snapshots.lastOption ) {
// increment the ref..
tempIndexFile.recursiveDelete
tempIndexFile.mkdirs
for( file <- snapshot.files ; if !file.endsWith(MANIFEST_SUFFIX) ) {
val target = tempIndexFile / file
// The file might be in a local snapshot already..
local_snapshots.get(file) match {
case Some(f) =>
// had it locally.. link it.
Util.link(f, target)
case None =>
// download..
var path = new Path(remoteIndexPath, file)
info("Downloading index file: "+path)
using(dfs.open(path, 32*1024)) { is=>
using(new FileOutputStream(target)) { os=>
copy(is, os)
}
}
}
}
val current = tempIndexFile / "CURRENT"
current.writeText(snapshot.current_manifest)
// We got everything ok, now rename.
tempIndexFile.renameTo(LevelDBClient.create_sequence_file(directory, id, INDEX_SUFFIX))
}
gcSnapshotRefs
}
def gcSnapshotRefs = {
snapshots = snapshots.filter { case (id, snapshot)=>
if (snapshotRefCounters.get(id).isDefined) {
true
} else {
for( file <- snapshot.files ) {
for( counter <- indexFileRefCounters.get(file) ) {
if( counter.decrementAndGet() <= 0 ) {
var path = new Path(remoteIndexPath, file)
info("Deleteing unreferenced index file: %s", path.getName)
dfs.delete(path, true)
indexFileRefCounters.remove(file)
}
}
}
false
}
}
}
def uploadIndexFiles(snapshot_id:Long):Unit = {
val source = LevelDBClient.create_sequence_file(directory, snapshot_id, INDEX_SUFFIX)
try {
// Build the new manifest..
val mf = new IndexManifestDTO
mf.snapshot_id = snapshot_id
mf.current_manifest = (source / "CURRENT").readText()
source.listFiles.foreach { file =>
val name = file.getName
if( name !="LOCK" && name !="CURRENT") {
mf.files.add(name)
}
}
import collection.JavaConversions._
mf.files.foreach { file =>
val refs = indexFileRefCounters.getOrElseUpdate(file, new LongCounter())
if(refs.get()==0) {
// Upload if not not yet on the remote.
val target = new Path(remoteIndexPath, file)
using(new FileInputStream(source / file)) { is=>
using(dfs.create(target, true, 1024*32, dfsReplication.toShort, dfsBlockSize)) { os=>
copy(is, os)
}
}
}
refs.incrementAndGet()
}
val target = create_sequence_path(remoteIndexPath, mf.snapshot_id, MANIFEST_SUFFIX)
mf.files.add(target.getName)
indexFileRefCounters.getOrElseUpdate(target.getName, new LongCounter()).incrementAndGet()
using(dfs.create(target, true, 1024*32, dfsReplication.toShort, dfsBlockSize)) { os=>
JsonCodec.mapper.writeValue(os, mf)
}
snapshots += snapshot_id -> Snapshot(mf.current_manifest, Set(mf.files.toSeq:_*))
snapshotRefCounters.getOrElseUpdate(snapshot_id, new LongCounter()).incrementAndGet()
} catch {
case e: Exception =>
warn(e, "Could not upload the index: " + e)
}
}
// Override the log appender implementation so that it
// stores the logs on the local and remote file systems.
override def createLog = new RecordLog(directory, LOG_SUFFIX) {
override protected def onDelete(file: File) = {
super.onDelete(file)
// also delete the file on the dfs.
dfs.delete(new Path(dfsDirectory, file.getName), false)
}
override def create_log_appender(position: Long) = {
new LogAppender(next_log(position), position) {
val dfs_path = new Path(dfsDirectory, file.getName)
debug("Opening DFS log file for append: "+dfs_path.getName)
val dfs_os = dfs.create(dfs_path, true, RecordLog.BUFFER_SIZE, dfsReplication.toShort, dfsBlockSize )
debug("Opened")
override def flush = this.synchronized {
if( write_buffer.position() > 0 ) {
var buffer: Buffer = write_buffer.toBuffer
// Write it to DFS..
buffer.writeTo(dfs_os.asInstanceOf[OutputStream]);
// Now write it to the local FS.
val byte_buffer = buffer.toByteBuffer
val pos = append_offset-byte_buffer.remaining
flushed_offset.addAndGet(byte_buffer.remaining)
channel.write(byte_buffer, pos)
if( byte_buffer.hasRemaining ) {
throw new IOException("Short write")
}
write_buffer.reset()
}
}
override def force = {
dfs_os.sync()
}
override def dispose() = {
try {
super.dispose()
} finally {
dfs_os.close()
}
}
}
}
}
}

View File

@ -0,0 +1,74 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb
import org.apache.hadoop.conf.Configuration
import org.apache.activemq.util.ServiceStopper
import org.apache.hadoop.fs.FileSystem
import scala.reflect.BeanProperty
import java.net.InetAddress
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class HALevelDBStore extends LevelDBStore {
@BeanProperty
var dfsUrl:String = _
@BeanProperty
var dfsConfig:String = _
@BeanProperty
var dfsDirectory:String = _
@BeanProperty
var dfsBlockSize = 1024*1024*50L
@BeanProperty
var dfsReplication = 1
@BeanProperty
var containerId:String = _
var dfs:FileSystem = _
override def doStart = {
if(dfs==null) {
Thread.currentThread().setContextClassLoader(getClass.getClassLoader)
val config = new Configuration()
config.set("fs.hdfs.impl.disable.cache", "true")
config.set("fs.file.impl.disable.cache", "true")
Option(dfsConfig).foreach(config.addResource(_))
Option(dfsUrl).foreach(config.set("fs.default.name", _))
dfsUrl = config.get("fs.default.name")
dfs = FileSystem.get(config)
}
if ( containerId==null ) {
containerId = InetAddress.getLocalHost.getHostName
}
super.doStart
}
override def doStop(stopper: ServiceStopper): Unit = {
super.doStop(stopper)
if(dfs!=null){
dfs.close()
}
}
override def createClient = new HALevelDBClient(this)
}

View File

@ -0,0 +1,43 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlRootElement;
import java.util.HashSet;
import java.util.Set;
/**
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
@XmlRootElement(name="index_files")
@XmlAccessorType(XmlAccessType.FIELD)
public class IndexManifestDTO {
@XmlAttribute(name = "snapshot_id")
public long snapshot_id;
@XmlAttribute(name = "current_manifest")
public String current_manifest;
@XmlAttribute(name = "file")
public Set<String> files = new HashSet<String>();
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,622 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb
import org.apache.activemq.broker.BrokerService
import org.apache.activemq.broker.BrokerServiceAware
import org.apache.activemq.broker.ConnectionContext
import org.apache.activemq.command._
import org.apache.activemq.openwire.OpenWireFormat
import org.apache.activemq.usage.SystemUsage
import java.io.File
import java.io.IOException
import java.util.concurrent.ExecutionException
import java.util.concurrent.Future
import java.util.concurrent.atomic.AtomicLong
import reflect.BeanProperty
import org.apache.activemq.store._
import java.util._
import scala.collection.mutable.ListBuffer
import javax.management.ObjectName
import org.apache.activemq.broker.jmx.AnnotatedMBean
import org.apache.activemq.util._
import org.apache.kahadb.util.LockFile
import org.apache.activemq.leveldb.util.{RetrySupport, FileSupport, Log}
object LevelDBStore extends Log {
val DONE = new CountDownFuture();
DONE.countDown
def toIOException(e: Throwable): IOException = {
if (e.isInstanceOf[ExecutionException]) {
var cause: Throwable = (e.asInstanceOf[ExecutionException]).getCause
if (cause.isInstanceOf[IOException]) {
return cause.asInstanceOf[IOException]
}
}
if (e.isInstanceOf[IOException]) {
return e.asInstanceOf[IOException]
}
return IOExceptionSupport.create(e)
}
def waitOn(future: Future[AnyRef]): Unit = {
try {
future.get
}
catch {
case e: Throwable => {
throw toIOException(e)
}
}
}
}
case class DurableSubscription(subKey:Long, topicKey:Long, info: SubscriptionInfo) {
var lastAckPosition = 0L
var cursorPosition = 0L
}
class LevelDBStoreView(val store:LevelDBStore) extends LevelDBStoreViewMBean {
import store._
def getAsyncBufferSize = asyncBufferSize
def getIndexDirectory = directory.getCanonicalPath
def getLogDirectory = Option(logDirectory).getOrElse(directory).getCanonicalPath
def getIndexBlockRestartInterval = indexBlockRestartInterval
def getIndexBlockSize = indexBlockSize
def getIndexCacheSize = indexCacheSize
def getIndexCompression = indexCompression
def getIndexFactory = db.client.factory.getClass.getName
def getIndexMaxOpenFiles = indexMaxOpenFiles
def getIndexWriteBufferSize = indexWriteBufferSize
def getLogSize = logSize
def getParanoidChecks = paranoidChecks
def getSync = sync
def getVerifyChecksums = verifyChecksums
def getUowClosedCounter = db.uowClosedCounter
def getUowCanceledCounter = db.uowCanceledCounter
def getUowStoringCounter = db.uowStoringCounter
def getUowStoredCounter = db.uowStoredCounter
def getUowMaxCompleteLatency = db.uow_complete_latency.get
def getMaxIndexWriteLatency = db.client.max_index_write_latency.get
def getMaxLogWriteLatency = db.client.log.max_log_write_latency.get
def getMaxLogFlushLatency = db.client.log.max_log_flush_latency.get
def getMaxLogRotateLatency = db.client.log.max_log_rotate_latency.get
def resetUowMaxCompleteLatency = db.uow_complete_latency.reset
def resetMaxIndexWriteLatency = db.client.max_index_write_latency.reset
def resetMaxLogWriteLatency = db.client.log.max_log_write_latency.reset
def resetMaxLogFlushLatency = db.client.log.max_log_flush_latency.reset
def resetMaxLogRotateLatency = db.client.log.max_log_rotate_latency.reset
def getIndexStats = db.client.index.getProperty("leveldb.stats")
}
import LevelDBStore._
class LevelDBStore extends ServiceSupport with BrokerServiceAware with PersistenceAdapter with TransactionStore {
final val wireFormat = new OpenWireFormat
final val db = new DBManager(this)
@BeanProperty
var directory: File = null
@BeanProperty
var logDirectory: File = null
@BeanProperty
var logSize: Long = 1024 * 1024 * 100
@BeanProperty
var indexFactory: String = "org.fusesource.leveldbjni.JniDBFactory, org.iq80.leveldb.impl.Iq80DBFactory"
@BeanProperty
var sync: Boolean = true
@BeanProperty
var verifyChecksums: Boolean = false
@BeanProperty
var indexMaxOpenFiles: Int = 1000
@BeanProperty
var indexBlockRestartInterval: Int = 16
@BeanProperty
var paranoidChecks: Boolean = false
@BeanProperty
var indexWriteBufferSize: Int = 1024*1024*6
@BeanProperty
var indexBlockSize: Int = 4 * 1024
@BeanProperty
var indexCompression: String = "snappy"
@BeanProperty
var logCompression: String = "none"
@BeanProperty
var indexCacheSize: Long = 1024 * 1024 * 256L
@BeanProperty
var flushDelay = 1000*5
@BeanProperty
var asyncBufferSize = 1024*1024*4
@BeanProperty
var monitorStats = false
@BeanProperty
var failIfLocked = false
var purgeOnStatup: Boolean = false
var brokerService: BrokerService = null
val queues = collection.mutable.HashMap[ActiveMQQueue, LevelDBStore#LevelDBMessageStore]()
val topics = collection.mutable.HashMap[ActiveMQTopic, LevelDBStore#LevelDBTopicMessageStore]()
val topicsById = collection.mutable.HashMap[Long, LevelDBStore#LevelDBTopicMessageStore]()
override def toString: String = {
return "LevelDB:[" + directory.getAbsolutePath + "]"
}
def objectName = {
var brokerON = brokerService.getBrokerObjectName
val broker_name = brokerON.getKeyPropertyList().get("BrokerName")
new ObjectName(brokerON.getDomain() + ":" +
"BrokerName="+JMXSupport.encodeObjectNamePart(broker_name)+ "," +
"Type=LevelDBStore");
}
def retry[T](func : =>T):T = RetrySupport.retry(LevelDBStore, isStarted, func _)
var lock_file: LockFile = _
var snappyCompressLogs = false
def doStart: Unit = {
import FileSupport._
snappyCompressLogs = logCompression.toLowerCase == "snappy" && Snappy != null
debug("starting")
if ( lock_file==null ) {
lock_file = new LockFile(directory / "lock", true)
}
// Expose a JMX bean to expose the status of the store.
if(brokerService!=null){
try {
AnnotatedMBean.registerMBean(brokerService.getManagementContext, new LevelDBStoreView(this), objectName)
} catch {
case e: Throwable => {
warn(e, "LevelDB Store could not be registered in JMX: " + e.getMessage)
}
}
}
if (failIfLocked) {
lock_file.lock()
} else {
retry {
lock_file.lock()
}
}
if (purgeOnStatup) {
purgeOnStatup = false
db.client.locked_purge
info("Purged: "+this)
}
db.start
db.loadCollections
debug("started")
}
def doStop(stopper: ServiceStopper): Unit = {
db.stop
lock_file.unlock()
if(brokerService!=null){
brokerService.getManagementContext().unregisterMBean(objectName);
}
info("Stopped "+this)
}
def setBrokerService(brokerService: BrokerService): Unit = {
this.brokerService = brokerService
}
def setBrokerName(brokerName: String): Unit = {
}
def setUsageManager(usageManager: SystemUsage): Unit = {
}
def deleteAllMessages: Unit = {
purgeOnStatup = true
}
def getLastMessageBrokerSequenceId: Long = {
return 0
}
def createTransactionStore = this
val transactions = collection.mutable.HashMap[TransactionId, Transaction]()
trait TransactionAction {
def apply(uow:DelayableUOW):Unit
}
case class Transaction(id:TransactionId) {
val commitActions = ListBuffer[TransactionAction]()
def add(store:LevelDBMessageStore, message: Message, delay:Boolean) = {
commitActions += new TransactionAction() {
def apply(uow:DelayableUOW) = {
store.doAdd(uow, message, delay)
}
}
}
def remove(store:LevelDBMessageStore, msgid:MessageId) = {
commitActions += new TransactionAction() {
def apply(uow:DelayableUOW) = {
store.doRemove(uow, msgid)
}
}
}
def updateAckPosition(store:LevelDBTopicMessageStore, sub: DurableSubscription, position: Long) = {
commitActions += new TransactionAction() {
def apply(uow:DelayableUOW) = {
store.doUpdateAckPosition(uow, sub, position)
}
}
}
}
def transaction(txid: TransactionId) = transactions.getOrElseUpdate(txid, Transaction(txid))
def commit(txid: TransactionId, wasPrepared: Boolean, preCommit: Runnable, postCommit: Runnable) = {
preCommit.run()
transactions.remove(txid) match {
case None=>
println("The transaction does not exist")
postCommit.run()
case Some(tx)=>
withUow { uow =>
for( action <- tx.commitActions ) {
action(uow)
}
uow.addCompleteListener( postCommit.run() )
}
}
}
def rollback(txid: TransactionId) = {
transactions.remove(txid) match {
case None=>
println("The transaction does not exist")
case Some(tx)=>
}
}
def prepare(tx: TransactionId) = {
sys.error("XA transactions not yet supported.")
}
def recover(listener: TransactionRecoveryListener) = {
}
def createQueueMessageStore(destination: ActiveMQQueue) = {
this.synchronized(queues.get(destination)).getOrElse(db.createQueueStore(destination))
}
def createQueueMessageStore(destination: ActiveMQQueue, key: Long):LevelDBMessageStore = {
var rc = new LevelDBMessageStore(destination, key)
this.synchronized {
queues.put(destination, rc)
}
rc
}
def removeQueueMessageStore(destination: ActiveMQQueue): Unit = this synchronized {
queues.remove(destination).foreach { store=>
db.destroyQueueStore(store.key)
}
}
def createTopicMessageStore(destination: ActiveMQTopic): TopicMessageStore = {
this.synchronized(topics.get(destination)).getOrElse(db.createTopicStore(destination))
}
def createTopicMessageStore(destination: ActiveMQTopic, key: Long):LevelDBTopicMessageStore = {
var rc = new LevelDBTopicMessageStore(destination, key)
this synchronized {
topics.put(destination, rc)
topicsById.put(key, rc)
}
rc
}
def removeTopicMessageStore(destination: ActiveMQTopic): Unit = {
topics.remove(destination).foreach { store=>
store.subscriptions.values.foreach { sub =>
db.removeSubscription(sub)
}
store.subscriptions.clear()
db.destroyQueueStore(store.key)
}
}
def getLogAppendPosition = db.getLogAppendPosition
def getDestinations: Set[ActiveMQDestination] = {
import collection.JavaConversions._
var rc: HashSet[ActiveMQDestination] = new HashSet[ActiveMQDestination]
rc.addAll(topics.keys)
rc.addAll(queues.keys)
return rc
}
def getLastProducerSequenceId(id: ProducerId): Long = {
return -1
}
def size: Long = {
return 0
}
def checkpoint(sync: Boolean): Unit = db.checkpoint(sync)
def withUow[T](func:(DelayableUOW)=>T):T = {
val uow = db.createUow
try {
func(uow)
} finally {
uow.release()
}
}
private def subscriptionKey(clientId: String, subscriptionName: String): String = {
return clientId + ":" + subscriptionName
}
case class LevelDBMessageStore(dest: ActiveMQDestination, val key: Long) extends AbstractMessageStore(dest) {
protected val lastSeq: AtomicLong = new AtomicLong(0)
protected var cursorPosition: Long = 0
lastSeq.set(db.getLastQueueEntrySeq(key))
def doAdd(uow: DelayableUOW, message: Message, delay:Boolean): CountDownFuture = {
uow.enqueue(key, lastSeq.incrementAndGet, message, delay)
}
override def asyncAddQueueMessage(context: ConnectionContext, message: Message) = asyncAddQueueMessage(context, message, false)
override def asyncAddQueueMessage(context: ConnectionContext, message: Message, delay: Boolean): Future[AnyRef] = {
if( message.getTransactionId!=null ) {
transaction(message.getTransactionId).add(this, message, delay)
DONE
} else {
withUow { uow=>
doAdd(uow, message, delay)
}
}
}
override def addMessage(context: ConnectionContext, message: Message) = addMessage(context, message, false)
override def addMessage(context: ConnectionContext, message: Message, delay: Boolean): Unit = {
waitOn(asyncAddQueueMessage(context, message, delay))
}
def doRemove(uow: DelayableUOW, id: MessageId): CountDownFuture = {
uow.dequeue(key, id)
}
override def removeAsyncMessage(context: ConnectionContext, ack: MessageAck): Unit = {
if( ack.getTransactionId!=null ) {
transaction(ack.getTransactionId).remove(this, ack.getLastMessageId)
DONE
} else {
waitOn(withUow{uow=>
doRemove(uow, ack.getLastMessageId)
})
}
}
def removeMessage(context: ConnectionContext, ack: MessageAck): Unit = {
removeAsyncMessage(context, ack)
}
def getMessage(id: MessageId): Message = {
var message: Message = db.getMessage(id)
if (message == null) {
throw new IOException("Message id not found: " + id)
}
return message
}
def removeAllMessages(context: ConnectionContext): Unit = {
db.collectionEmpty(key)
cursorPosition = 0
}
def getMessageCount: Int = {
return db.collectionSize(key).toInt
}
override def isEmpty: Boolean = {
return db.collectionIsEmpty(key)
}
def recover(listener: MessageRecoveryListener): Unit = {
cursorPosition = db.cursorMessages(key, listener, 0)
}
def resetBatching: Unit = {
cursorPosition = 0
}
def recoverNextMessages(maxReturned: Int, listener: MessageRecoveryListener): Unit = {
cursorPosition = db.cursorMessages(key, LimitingRecoveryListener(maxReturned, listener), cursorPosition)
}
override def setBatch(id: MessageId): Unit = {
cursorPosition = db.queuePosition(id)
}
}
case class LimitingRecoveryListener(max: Int, listener: MessageRecoveryListener) extends MessageRecoveryListener {
private var recovered: Int = 0
def hasSpace = recovered < max && listener.hasSpace
def recoverMessage(message: Message) = {
recovered += 1;
listener.recoverMessage(message)
}
def recoverMessageReference(ref: MessageId) = {
recovered += 1;
listener.recoverMessageReference(ref)
}
def isDuplicate(ref: MessageId) = listener.isDuplicate(ref)
}
//
// This gts called when the store is first loading up, it restores
// the existing durable subs..
def createSubscription(sub:DurableSubscription) = {
this.synchronized(topicsById.get(sub.topicKey)) match {
case Some(topic) =>
topic.synchronized {
topic.subscriptions.put((sub.info.getClientId, sub.info.getSubcriptionName), sub)
}
case None =>
// Topic does not exist.. so kill the durable sub..
db.removeSubscription(sub)
}
}
def getTopicGCPositions = {
import collection.JavaConversions._
val topics = this.synchronized {
new ArrayList(topicsById.values())
}
topics.flatMap(_.gcPosition).toSeq
}
class LevelDBTopicMessageStore(dest: ActiveMQDestination, key: Long) extends LevelDBMessageStore(dest, key) with TopicMessageStore {
val subscriptions = collection.mutable.HashMap[(String, String), DurableSubscription]()
var firstSeq = 0L
def gcPosition:Option[(Long, Long)] = {
var pos = lastSeq.get()
subscriptions.synchronized {
subscriptions.values.foreach { sub =>
if( sub.lastAckPosition < pos ) {
pos = sub.lastAckPosition
}
}
if( firstSeq != pos+1) {
firstSeq = pos+1
Some(key, firstSeq)
} else {
None
}
}
}
def addSubsciption(info: SubscriptionInfo, retroactive: Boolean) = {
var sub = db.addSubscription(key, info)
subscriptions.synchronized {
subscriptions.put((info.getClientId, info.getSubcriptionName), sub)
}
sub.lastAckPosition = if (retroactive) 0 else lastSeq.get()
waitOn(withUow{ uow=>
uow.updateAckPosition(sub)
uow.countDownFuture
})
}
def getAllSubscriptions: Array[SubscriptionInfo] = subscriptions.synchronized {
subscriptions.values.map(_.info).toArray
}
def lookupSubscription(clientId: String, subscriptionName: String): SubscriptionInfo = subscriptions.synchronized {
subscriptions.get((clientId, subscriptionName)).map(_.info).getOrElse(null)
}
def deleteSubscription(clientId: String, subscriptionName: String): Unit = {
subscriptions.synchronized {
subscriptions.remove((clientId, subscriptionName))
}.foreach(db.removeSubscription(_))
}
private def lookup(clientId: String, subscriptionName: String): Option[DurableSubscription] = subscriptions.synchronized {
subscriptions.get((clientId, subscriptionName))
}
def doUpdateAckPosition(uow: DelayableUOW, sub: DurableSubscription, position: Long) = {
sub.lastAckPosition = position
uow.updateAckPosition(sub)
}
def acknowledge(context: ConnectionContext, clientId: String, subscriptionName: String, messageId: MessageId, ack: MessageAck): Unit = {
lookup(clientId, subscriptionName).foreach { sub =>
var position = db.queuePosition(messageId)
if( ack.getTransactionId!=null ) {
transaction(ack.getTransactionId).updateAckPosition(this, sub, position)
DONE
} else {
waitOn(withUow{ uow=>
doUpdateAckPosition(uow, sub, position)
uow.countDownFuture
})
}
}
}
def resetBatching(clientId: String, subscriptionName: String): Unit = {
lookup(clientId, subscriptionName).foreach { sub =>
sub.cursorPosition = 0
}
}
def recoverSubscription(clientId: String, subscriptionName: String, listener: MessageRecoveryListener): Unit = {
lookup(clientId, subscriptionName).foreach { sub =>
sub.cursorPosition = db.cursorMessages(key, listener, sub.cursorPosition.max(sub.lastAckPosition+1))
}
}
def recoverNextMessages(clientId: String, subscriptionName: String, maxReturned: Int, listener: MessageRecoveryListener): Unit = {
lookup(clientId, subscriptionName).foreach { sub =>
sub.cursorPosition = db.cursorMessages(key, LimitingRecoveryListener(maxReturned, listener), sub.cursorPosition.max(sub.lastAckPosition+1))
}
}
def getMessageCount(clientId: String, subscriptionName: String): Int = {
lookup(clientId, subscriptionName) match {
case Some(sub) => (lastSeq.get - sub.lastAckPosition).toInt
case None => 0
}
}
}
///////////////////////////////////////////////////////////////////////////
// The following methods actually have nothing to do with JMS txs... It's more like
// operation batch.. we handle that in the DBManager tho..
///////////////////////////////////////////////////////////////////////////
def beginTransaction(context: ConnectionContext): Unit = {}
def commitTransaction(context: ConnectionContext): Unit = {}
def rollbackTransaction(context: ConnectionContext): Unit = {}
def createClient = new LevelDBClient(this);
}

View File

@ -0,0 +1,107 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb;
import org.apache.activemq.broker.jmx.MBeanInfo;
import java.io.File;
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
public interface LevelDBStoreViewMBean {
@MBeanInfo("The directory holding the store index data.")
String getIndexDirectory();
@MBeanInfo("The directory holding the store log data.")
String getLogDirectory();
@MBeanInfo("The size the log files are allowed to grow to.")
long getLogSize();
@MBeanInfo("The implementation of the LevelDB index being used.")
String getIndexFactory();
@MBeanInfo("Are writes synced to disk.")
boolean getSync();
@MBeanInfo("Is data verified against checksums as it's loaded back from disk.")
boolean getVerifyChecksums();
@MBeanInfo("The maximum number of open files the index will open at one time.")
int getIndexMaxOpenFiles();
@MBeanInfo("Number of keys between restart points for delta encoding of keys in the index")
int getIndexBlockRestartInterval();
@MBeanInfo("Do aggressive checking of store data")
boolean getParanoidChecks();
@MBeanInfo("Amount of data to build up in memory for the index before converting to a sorted on-disk file.")
int getIndexWriteBufferSize();
@MBeanInfo("Approximate size of user data packed per block for the index")
int getIndexBlockSize();
@MBeanInfo("The type of compression to use for the index")
String getIndexCompression();
@MBeanInfo("The size of the cache index")
long getIndexCacheSize();
@MBeanInfo("The maximum amount of async writes to buffer up")
int getAsyncBufferSize();
@MBeanInfo("The number of units of work which have been closed.")
long getUowClosedCounter();
@MBeanInfo("The number of units of work which have been canceled.")
long getUowCanceledCounter();
@MBeanInfo("The number of units of work which started getting stored.")
long getUowStoringCounter();
@MBeanInfo("The number of units of work which completed getting stored")
long getUowStoredCounter();
@MBeanInfo("Gets and resets the maximum time (in ms) a unit of work took to complete.")
double resetUowMaxCompleteLatency();
@MBeanInfo("Gets and resets the maximum time (in ms) an index write batch took to execute.")
double resetMaxIndexWriteLatency();
@MBeanInfo("Gets and resets the maximum time (in ms) a log write took to execute (includes the index write latency).")
double resetMaxLogWriteLatency();
@MBeanInfo("Gets and resets the maximum time (in ms) a log flush took to execute.")
double resetMaxLogFlushLatency();
@MBeanInfo("Gets and resets the maximum time (in ms) a log rotation took to perform.")
double resetMaxLogRotateLatency();
@MBeanInfo("Gets the maximum time (in ms) a unit of work took to complete.")
double getUowMaxCompleteLatency();
@MBeanInfo("Gets the maximum time (in ms) an index write batch took to execute.")
double getMaxIndexWriteLatency();
@MBeanInfo("Gets the maximum time (in ms) a log write took to execute (includes the index write latency).")
double getMaxLogWriteLatency();
@MBeanInfo("Gets the maximum time (in ms) a log flush took to execute.")
double getMaxLogFlushLatency();
@MBeanInfo("Gets the maximum time (in ms) a log rotation took to perform.")
double getMaxLogRotateLatency();
@MBeanInfo("Gets the index statistics.")
String getIndexStats();
}

View File

@ -0,0 +1,518 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb
import java.{lang=>jl}
import java.{util=>ju}
import java.util.zip.CRC32
import java.util.Map.Entry
import java.util.concurrent.atomic.AtomicLong
import java.io._
import org.fusesource.hawtbuf.{DataByteArrayInputStream, DataByteArrayOutputStream, Buffer}
import org.fusesource.hawtdispatch.BaseRetained
import org.apache.activemq.leveldb.util.FileSupport._
import org.apache.activemq.util.LRUCache
import util.TimeMetric._
import util.{TimeMetric, Log}
import java.util.TreeMap
object RecordLog extends Log {
// The log files contain a sequence of variable length log records:
// record := header + data
//
// header :=
// '*' : int8 // Start of Record Magic
// kind : int8 // Help identify content type of the data.
// checksum : uint32 // crc32c of the data[]
// length : uint32 // the length the the data
val LOG_HEADER_PREFIX = '*'.toByte
val UOW_END_RECORD = -1.toByte
val LOG_HEADER_SIZE = 10
val BUFFER_SIZE = 1024*512
val BYPASS_BUFFER_SIZE = 1024*16
case class LogInfo(file:File, position:Long, length:Long) {
def limit = position+length
}
def encode_long(a1:Long) = {
val out = new DataByteArrayOutputStream(8)
out.writeLong(a1)
out.toBuffer
}
def decode_long(value:Buffer):Long = {
val in = new DataByteArrayInputStream(value)
in.readLong()
}
}
case class RecordLog(directory: File, logSuffix:String) {
import RecordLog._
directory.mkdirs()
var logSize = 1024 * 1024 * 100L
var current_appender:LogAppender = _
var verify_checksums = false
var sync = false
val log_infos = new TreeMap[Long, LogInfo]()
object log_mutex
def delete(id:Long) = {
log_mutex.synchronized {
// We can't delete the current appender.
if( current_appender.position != id ) {
Option(log_infos.get(id)).foreach { info =>
onDelete(info.file)
log_infos.remove(id)
}
}
}
}
protected def onDelete(file:File) = {
file.delete()
}
def checksum(data: Buffer): Int = {
val checksum = new CRC32
checksum.update(data.data, data.offset, data.length)
(checksum.getValue & 0xFFFFFFFF).toInt
}
class LogAppender(file:File, position:Long) extends LogReader(file, position) {
val info = new LogInfo(file, position, 0)
override def open = new RandomAccessFile(file, "rw")
override def dispose() = {
force
super.dispose()
}
var append_offset = 0L
val flushed_offset = new AtomicLong(0)
def append_position = {
position+append_offset
}
// set the file size ahead of time so that we don't have to sync the file
// meta-data on every log sync.
channel.position(logSize-1)
channel.write(new Buffer(1).toByteBuffer)
channel.force(true)
if( sync ) {
channel.position(0)
}
val write_buffer = new DataByteArrayOutputStream(BUFFER_SIZE+LOG_HEADER_SIZE)
def force = {
flush
if(sync) {
max_log_flush_latency {
// only need to update the file metadata if the file size changes..
channel.force(append_offset > logSize)
}
}
}
/**
* returns the offset position of the data record.
*/
def append(id:Byte, data: Buffer) = this.synchronized {
val record_position = append_position
val data_length = data.length
val total_length = LOG_HEADER_SIZE + data_length
if( write_buffer.position() + total_length > BUFFER_SIZE ) {
flush
}
val cs: Int = checksum(data)
// trace("Writing at: "+record_position+" len: "+data_length+" with checksum: "+cs)
if( false && total_length > BYPASS_BUFFER_SIZE ) {
// Write the header and flush..
write_buffer.writeByte(LOG_HEADER_PREFIX)
write_buffer.writeByte(id)
write_buffer.writeInt(cs)
write_buffer.writeInt(data_length)
append_offset += LOG_HEADER_SIZE
flush
// Directly write the data to the channel since it's large.
val buffer = data.toByteBuffer
val pos = append_offset+LOG_HEADER_SIZE
val remaining = buffer.remaining
channel.write(buffer, pos)
flushed_offset.addAndGet(remaining)
if( buffer.hasRemaining ) {
throw new IOException("Short write")
}
append_offset += data_length
} else {
write_buffer.writeByte(LOG_HEADER_PREFIX)
write_buffer.writeByte(id)
write_buffer.writeInt(cs)
write_buffer.writeInt(data_length)
write_buffer.write(data.data, data.offset, data_length)
append_offset += total_length
}
(record_position, info)
}
def flush = max_log_flush_latency { this.synchronized {
if( write_buffer.position() > 0 ) {
val buffer = write_buffer.toBuffer.toByteBuffer
val remaining = buffer.remaining
val pos = append_offset-remaining
channel.write(buffer, pos)
flushed_offset.addAndGet(remaining)
if( buffer.hasRemaining ) {
throw new IOException("Short write")
}
write_buffer.reset()
} }
}
override def check_read_flush(end_offset:Long) = {
if( flushed_offset.get() < end_offset ) {
flush
}
}
}
case class LogReader(file:File, position:Long) extends BaseRetained {
def open = new RandomAccessFile(file, "r")
val fd = open
val channel = fd.getChannel
override def dispose() {
fd.close()
}
def check_read_flush(end_offset:Long) = {}
def read(record_position:Long, length:Int) = {
val offset = record_position-position
assert(offset >=0 )
check_read_flush(offset+LOG_HEADER_SIZE+length)
if(verify_checksums) {
val record = new Buffer(LOG_HEADER_SIZE+length)
def record_is_not_changing = {
using(open) { fd =>
val channel = fd.getChannel
val new_record = new Buffer(LOG_HEADER_SIZE+length)
channel.read(new_record.toByteBuffer, offset)
var same = record == new_record
println(same)
same
}
}
if( channel.read(record.toByteBuffer, offset) != record.length ) {
assert( record_is_not_changing )
throw new IOException("short record at position: "+record_position+" in file: "+file+", offset: "+offset)
}
val is = new DataByteArrayInputStream(record)
val prefix = is.readByte()
if( prefix != LOG_HEADER_PREFIX ) {
assert(record_is_not_changing)
throw new IOException("invalid record at position: "+record_position+" in file: "+file+", offset: "+offset)
}
val id = is.readByte()
val expectedChecksum = is.readInt()
val expectedLength = is.readInt()
val data = is.readBuffer(length)
// If your reading the whole record we can verify the data checksum
if( expectedLength == length ) {
if( expectedChecksum != checksum(data) ) {
assert(record_is_not_changing)
throw new IOException("checksum does not match at position: "+record_position+" in file: "+file+", offset: "+offset)
}
}
data
} else {
val data = new Buffer(length)
if( channel.read(data.toByteBuffer, offset+LOG_HEADER_SIZE) != data.length ) {
throw new IOException("short record at position: "+record_position+" in file: "+file+", offset: "+offset)
}
data
}
}
def read(record_position:Long) = {
val offset = record_position-position
val header = new Buffer(LOG_HEADER_SIZE)
channel.read(header.toByteBuffer, offset)
val is = header.bigEndianEditor();
val prefix = is.readByte()
if( prefix != LOG_HEADER_PREFIX ) {
// Does not look like a record.
throw new IOException("invalid record position")
}
val id = is.readByte()
val expectedChecksum = is.readInt()
val length = is.readInt()
val data = new Buffer(length)
if( channel.read(data.toByteBuffer, offset+LOG_HEADER_SIZE) != length ) {
throw new IOException("short record")
}
if(verify_checksums) {
if( expectedChecksum != checksum(data) ) {
throw new IOException("checksum does not match")
}
}
(id, data, record_position+LOG_HEADER_SIZE+length)
}
def check(record_position:Long):Option[(Long, Option[Long])] = {
var offset = record_position-position
val header = new Buffer(LOG_HEADER_SIZE)
channel.read(header.toByteBuffer, offset)
val is = header.bigEndianEditor();
val prefix = is.readByte()
if( prefix != LOG_HEADER_PREFIX ) {
return None // Does not look like a record.
}
val kind = is.readByte()
val expectedChecksum = is.readInt()
val length = is.readInt()
val chunk = new Buffer(1024*4)
val chunkbb = chunk.toByteBuffer
offset += LOG_HEADER_SIZE
// Read the data in in chunks to avoid
// OOME if we are checking an invalid record
// with a bad record length
val checksumer = new CRC32
var remaining = length
while( remaining > 0 ) {
val chunkSize = remaining.min(1024*4);
chunkbb.position(0)
chunkbb.limit(chunkSize)
channel.read(chunkbb, offset)
if( chunkbb.hasRemaining ) {
return None
}
checksumer.update(chunk.data, 0, chunkSize)
offset += chunkSize
remaining -= chunkSize
}
val checksum = ( checksumer.getValue & 0xFFFFFFFF).toInt
if( expectedChecksum != checksum ) {
return None
}
val uow_start_pos = if(kind == UOW_END_RECORD && length==8) Some(decode_long(chunk)) else None
return Some(record_position+LOG_HEADER_SIZE+length, uow_start_pos)
}
def verifyAndGetEndPosition:Long = {
var pos = position;
var current_uow_start = pos
val limit = position+channel.size()
while(pos < limit) {
check(pos) match {
case Some((next, uow_start_pos)) =>
uow_start_pos.foreach { uow_start_pos =>
if( uow_start_pos == current_uow_start ) {
current_uow_start = next
} else {
return current_uow_start
}
}
pos = next
case None =>
return current_uow_start
}
}
return current_uow_start
}
}
def create_log_appender(position: Long) = {
new LogAppender(next_log(position), position)
}
def create_appender(position: Long): Any = {
log_mutex.synchronized {
if(current_appender!=null) {
log_infos.put (position, new LogInfo(current_appender.file, current_appender.position, current_appender.append_offset))
}
current_appender = create_log_appender(position)
log_infos.put(position, new LogInfo(current_appender.file, position, 0))
}
}
val max_log_write_latency = TimeMetric()
val max_log_flush_latency = TimeMetric()
val max_log_rotate_latency = TimeMetric()
def open = {
log_mutex.synchronized {
log_infos.clear()
LevelDBClient.find_sequence_files(directory, logSuffix).foreach { case (position,file) =>
log_infos.put(position, LogInfo(file, position, file.length()))
}
val appendPos = if( log_infos.isEmpty ) {
0L
} else {
val file = log_infos.lastEntry().getValue
val r = LogReader(file.file, file.position)
try {
val actualLength = r.verifyAndGetEndPosition
val updated = file.copy(length = actualLength - file.position)
log_infos.put(updated.position, updated)
if( updated.file.length != file.length ) {
// we need to truncate.
using(new RandomAccessFile(file.file, "rw")) ( _.setLength(updated.length))
}
actualLength
} finally {
r.release()
}
}
create_appender(appendPos)
}
}
def close = {
log_mutex.synchronized {
current_appender.release
}
}
def appender_limit = current_appender.append_position
def appender_start = current_appender.position
def next_log(position:Long) = LevelDBClient.create_sequence_file(directory, position, logSuffix)
def appender[T](func: (LogAppender)=>T):T= {
val intial_position = current_appender.append_position
try {
max_log_write_latency {
val rc = func(current_appender)
if( current_appender.append_position != intial_position ) {
// Record a UOW_END_RECORD so that on recovery we only replay full units of work.
current_appender.append(UOW_END_RECORD,encode_long(intial_position))
}
rc
}
} finally {
current_appender.flush
max_log_rotate_latency {
log_mutex.synchronized {
if ( current_appender.append_offset >= logSize ) {
current_appender.release()
on_log_rotate()
create_appender(current_appender.append_position)
}
}
}
}
}
var on_log_rotate: ()=>Unit = ()=>{}
private val reader_cache = new LRUCache[File, LogReader](100) {
protected override def onCacheEviction(entry: Entry[File, LogReader]) = {
entry.getValue.release()
}
}
def log_info(pos:Long) = log_mutex.synchronized { Option(log_infos.floorEntry(pos)).map(_.getValue) }
private def get_reader[T](record_position:Long)(func: (LogReader)=>T) = {
val lookup = log_mutex.synchronized {
val info = log_info(record_position)
info.map { info=>
if(info.position == current_appender.position) {
current_appender.retain()
(info, current_appender)
} else {
(info, null)
}
}
}
lookup.map { case (info, appender) =>
val reader = if( appender!=null ) {
// read from the current appender.
appender
} else {
// Checkout a reader from the cache...
reader_cache.synchronized {
var reader = reader_cache.get(info.file)
if(reader==null) {
reader = LogReader(info.file, info.position)
reader_cache.put(info.file, reader)
}
reader.retain()
reader
}
}
try {
func(reader)
} finally {
reader.release
}
}
}
def read(pos:Long) = {
get_reader(pos)(_.read(pos))
}
def read(pos:Long, length:Int) = {
get_reader(pos)(_.read(pos, length))
}
}

View File

@ -0,0 +1,296 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.util
import java.io._
import org.fusesource.hawtdispatch._
import org.apache.activemq.leveldb.LevelDBClient
import org.fusesource.leveldbjni.internal.Util
import org.apache.activemq.leveldb.util.ProcessSupport._
object FileSupport {
implicit def toRichFile(file:File):RichFile = new RichFile(file)
val onWindows = System.getProperty("os.name").toLowerCase().startsWith("windows")
private var linkStrategy = 0
private val LOG = Log(getClass)
def link(source:File, target:File):Unit = {
linkStrategy match {
case 0 =>
// We first try to link via a native system call. Fails if
// we cannot load the JNI module.
try {
Util.link(source, target)
} catch {
case e:IOException => throw e
case e:Throwable =>
// Fallback.. to a slower impl..
LOG.debug("Native link system call not available")
linkStrategy = 5
link(source, target)
}
// TODO: consider implementing a case which does the native system call using JNA
case 5 =>
// Next we try to do the link by executing an
// operating system shell command
try {
if( onWindows ) {
system("fsutil", "hardlink", "create", target.getCanonicalPath, source.getCanonicalPath) match {
case(0, _, _) => // Success
case (_, out, err) =>
// TODO: we might want to look at the out/err to see why it failed
// to avoid falling back to the slower strategy.
LOG.debug("fsutil OS command not available either")
linkStrategy = 10
link(source, target)
}
} else {
system("ln", source.getCanonicalPath, target.getCanonicalPath) match {
case(0, _, _) => // Success
case (_, out, err) => None
// TODO: we might want to look at the out/err to see why it failed
// to avoid falling back to the slower strategy.
LOG.debug("ln OS command not available either")
linkStrategy = 2
link(source, target)
}
}
} catch {
case e:Throwable =>
}
case _ =>
// this final strategy is slow but sure to work.
source.copyTo(target)
}
}
def systemDir(name:String) = {
val baseValue = System.getProperty(name)
if( baseValue==null ) {
sys.error("The the %s system property is not set.".format(name))
}
val file = new File(baseValue)
if( !file.isDirectory ) {
sys.error("The the %s system property is not set to valid directory path %s".format(name, baseValue))
}
file
}
case class RichFile(self:File) {
def / (path:String) = new File(self, path)
def linkTo(target:File) = link(self, target)
def copyTo(target:File) = {
using(new FileOutputStream(target)){ os=>
using(new FileInputStream(self)){ is=>
FileSupport.copy(is, os)
}
}
}
def listFiles:Array[File] = {
Option(self.listFiles()).getOrElse(Array())
}
def recursiveList:List[File] = {
if( self.isDirectory ) {
self :: self.listFiles.toList.flatten( _.recursiveList )
} else {
self :: Nil
}
}
def recursiveDelete: Unit = {
if( self.exists ) {
if( self.isDirectory ) {
self.listFiles.foreach(_.recursiveDelete)
}
self.delete
}
}
def recursiveCopyTo(target: File) : Unit = {
if (self.isDirectory) {
target.mkdirs
self.listFiles.foreach( file=> file.recursiveCopyTo( target / file.getName) )
} else {
self.copyTo(target)
}
}
def readText(charset:String="UTF-8"): String = {
using(new FileInputStream(self)) { in =>
FileSupport.readText(in, charset)
}
}
def readBytes: Array[Byte] = {
using(new FileInputStream(self)) { in =>
FileSupport.readBytes(in)
}
}
def writeBytes(data:Array[Byte]):Unit = {
using(new FileOutputStream(self)) { out =>
FileSupport.writeBytes(out, data)
}
}
def writeText(data:String, charset:String="UTF-8"):Unit = {
using(new FileOutputStream(self)) { out =>
FileSupport.writeText(out, data, charset)
}
}
}
/**
* Returns the number of bytes copied.
*/
def copy(in: InputStream, out: OutputStream): Long = {
var bytesCopied: Long = 0
val buffer = new Array[Byte](8192)
var bytes = in.read(buffer)
while (bytes >= 0) {
out.write(buffer, 0, bytes)
bytesCopied += bytes
bytes = in.read(buffer)
}
bytesCopied
}
def using[R,C <: Closeable](closable: C)(proc: C=>R) = {
try {
proc(closable)
} finally {
try { closable.close } catch { case ignore => }
}
}
def readText(in: InputStream, charset:String="UTF-8"): String = {
new String(readBytes(in), charset)
}
def readBytes(in: InputStream): Array[Byte] = {
val out = new ByteArrayOutputStream()
copy(in, out)
out.toByteArray
}
def writeText(out: OutputStream, value: String, charset:String="UTF-8"): Unit = {
writeBytes(out, value.getBytes(charset))
}
def writeBytes(out: OutputStream, data: Array[Byte]): Unit = {
copy(new ByteArrayInputStream(data), out)
}
}
object ProcessSupport {
import FileSupport._
implicit def toRichProcessBuilder(self:ProcessBuilder):RichProcessBuilder = new RichProcessBuilder(self)
case class RichProcessBuilder(self:ProcessBuilder) {
def start(out:OutputStream=null, err:OutputStream=null, in:InputStream=null) = {
self.redirectErrorStream(out == err)
val process = self.start
if( in!=null ) {
LevelDBClient.THREAD_POOL {
try {
using(process.getOutputStream) { out =>
FileSupport.copy(in, out)
}
} catch {
case _ =>
}
}
} else {
process.getOutputStream.close
}
if( out!=null ) {
LevelDBClient.THREAD_POOL {
try {
using(process.getInputStream) { in =>
FileSupport.copy(in, out)
}
} catch {
case _ =>
}
}
} else {
process.getInputStream.close
}
if( err!=null && err!=out ) {
LevelDBClient.THREAD_POOL {
try {
using(process.getErrorStream) { in =>
FileSupport.copy(in, err)
}
} catch {
case _ =>
}
}
} else {
process.getErrorStream.close
}
process
}
}
implicit def toRichProcess(self:Process):RichProcess = new RichProcess(self)
case class RichProcess(self:Process) {
def onExit(func: (Int)=>Unit) = LevelDBClient.THREAD_POOL {
self.waitFor
func(self.exitValue)
}
}
implicit def toProcessBuilder(args:Seq[String]):ProcessBuilder = new ProcessBuilder().command(args : _*)
def launch(command:String*)(func: (Int, Array[Byte], Array[Byte])=>Unit ):Unit = launch(command)(func)
def launch(p:ProcessBuilder, in:InputStream=null)(func: (Int, Array[Byte], Array[Byte]) => Unit):Unit = {
val out = new ByteArrayOutputStream
val err = new ByteArrayOutputStream
p.start(out, err, in).onExit { code=>
func(code, out.toByteArray, err.toByteArray)
}
}
def system(command:String*):(Int, Array[Byte], Array[Byte]) = system(command)
def system(p:ProcessBuilder, in:InputStream=null):(Int, Array[Byte], Array[Byte]) = {
val out = new ByteArrayOutputStream
val err = new ByteArrayOutputStream
val process = p.start(out, err, in)
process.waitFor
(process.exitValue, out.toByteArray, err.toByteArray)
}
}

View File

@ -0,0 +1,180 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.util
import java.util.concurrent.atomic.AtomicLong
import org.slf4j.{MDC, Logger, LoggerFactory}
import java.lang.{Throwable, String}
/**
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
object Log {
def apply(clazz:Class[_]):Log = apply(clazz.getName.stripSuffix("$"))
def apply(name:String):Log = new Log {
override val log = LoggerFactory.getLogger(name)
}
def apply(value:Logger):Log = new Log {
override val log = value
}
val exception_id_generator = new AtomicLong(System.currentTimeMillis)
def next_exception_id = exception_id_generator.incrementAndGet.toHexString
}
/**
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
trait Log {
import Log._
val log = LoggerFactory.getLogger(getClass.getName.stripSuffix("$"))
private def with_throwable(e:Throwable)(func: =>Unit) = {
if( e!=null ) {
val stack_ref = if( log.isDebugEnabled ) {
val id = next_exception_id
MDC.put("stackref", id.toString);
Some(id)
} else {
None
}
func
stack_ref.foreach { id=>
log.debug(e.toString, e)
MDC.remove("stackref")
}
} else {
func
}
}
private def format(message:String, args:Seq[Any]) = {
if( args.isEmpty ) {
message
} else {
message.format(args.map(_.asInstanceOf[AnyRef]) : _*)
}
}
def error(m: => String, args:Any*): Unit = {
if( log.isErrorEnabled ) {
log.error(format(m, args.toSeq))
}
}
def error(e: Throwable, m: => String, args:Any*): Unit = {
with_throwable(e) {
if( log.isErrorEnabled ) {
log.error(format(m, args.toSeq))
}
}
}
def error(e: Throwable): Unit = {
with_throwable(e) {
if( log.isErrorEnabled ) {
log.error(e.getMessage)
}
}
}
def warn(m: => String, args:Any*): Unit = {
if( log.isWarnEnabled ) {
log.warn(format(m, args.toSeq))
}
}
def warn(e: Throwable, m: => String, args:Any*): Unit = {
with_throwable(e) {
if( log.isWarnEnabled ) {
log.warn(format(m, args.toSeq))
}
}
}
def warn(e: Throwable): Unit = {
with_throwable(e) {
if( log.isWarnEnabled ) {
log.warn(e.toString)
}
}
}
def info(m: => String, args:Any*): Unit = {
if( log.isInfoEnabled ) {
log.info(format(m, args.toSeq))
}
}
def info(e: Throwable, m: => String, args:Any*): Unit = {
with_throwable(e) {
if( log.isInfoEnabled ) {
log.info(format(m, args.toSeq))
}
}
}
def info(e: Throwable): Unit = {
with_throwable(e) {
if( log.isInfoEnabled ) {
log.info(e.toString)
}
}
}
def debug(m: => String, args:Any*): Unit = {
if( log.isDebugEnabled ) {
log.debug(format(m, args.toSeq))
}
}
def debug(e: Throwable, m: => String, args:Any*): Unit = {
if( log.isDebugEnabled ) {
log.debug(format(m, args.toSeq), e)
}
}
def debug(e: Throwable): Unit = {
if( log.isDebugEnabled ) {
log.debug(e.toString, e)
}
}
def trace(m: => String, args:Any*): Unit = {
if( log.isTraceEnabled ) {
log.trace(format(m, args.toSeq))
}
}
def trace(e: Throwable, m: => String, args:Any*): Unit = {
if( log.isTraceEnabled ) {
log.trace(format(m, args.toSeq), e)
}
}
def trace(e: Throwable): Unit = {
if( log.isTraceEnabled ) {
log.trace(e.toString, e)
}
}
}

View File

@ -0,0 +1,48 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.util
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class LongCounter(private var value:Long = 0) extends Serializable {
def clear() = value=0
def get() = value
def set(value:Long) = this.value = value
def incrementAndGet() = addAndGet(1)
def decrementAndGet() = addAndGet(-1)
def addAndGet(amount:Long) = {
value+=amount
value
}
def getAndIncrement() = getAndAdd(1)
def getAndDecrement() = getAndAdd(-11)
def getAndAdd(amount:Long) = {
val rc = value
value+=amount
rc
}
override def toString() = get().toString
}

View File

@ -0,0 +1,63 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.util
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
object RetrySupport {
def retry[T](log:Log, isStarted: ()=>Boolean, func: ()=>T): T = {
import log._
var error:Throwable = null
var rc:Option[T] = None
// We will loop until the tx succeeds. Perhaps it's
// failing due to a temporary condition like low disk space.
while(!rc.isDefined) {
try {
rc = Some(func())
} catch {
case e:Throwable =>
e.printStackTrace()
if( error==null ) {
warn(e, "DB operation failed. (entering recovery mode)")
}
error = e
}
if (!rc.isDefined) {
// We may need to give up if the store is being stopped.
if ( !isStarted() ) {
throw error
}
Thread.sleep(1000)
}
}
if( error!=null ) {
info("DB recovered from failure.")
}
rc.get
}
}

View File

@ -0,0 +1,50 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.util
case class TimeMetric() {
var max = 0L
def add(duration:Long) = this.synchronized {
max = max.max(duration)
}
def get = {
this.synchronized {
max
} / 1000000.0
}
def reset = {
this.synchronized {
val rc = max
max = 0
rc
} / 1000000.0
}
def apply[T](func: =>T):T = {
val start = System.nanoTime()
try {
func
} finally {
add(System.nanoTime() - start)
}
}
}

View File

@ -23,7 +23,7 @@ import org.apache.activemq.broker.BrokerService;
import org.apache.activemq.broker.BrokerTest;
import org.apache.activemq.store.kahadb.KahaDBStore;
import org.apache.activemq.util.IOHelper;
import org.fusesource.mq.leveldb.LevelDBStore;
import org.apache.activemq.leveldb.LevelDBStore;
/**
* Once the wire format is completed we can test against real persistence storage.

View File

@ -0,0 +1,36 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# The logging properties used during tests..
#
log4j.rootLogger=WARN, console, file
log4j.logger.org.apache.activemq=INFO
log4j.logger.org.fusesource=INFO
# Console will only display warnnings
log4j.appender.console=org.apache.log4j.ConsoleAppender
log4j.appender.console.layout=org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern=%-5p | %t | %m%n
log4j.appender.console.threshold=TRACE
# File appender will contain all info messages
log4j.appender.file=org.apache.log4j.FileAppender
log4j.appender.file.layout=org.apache.log4j.PatternLayout
log4j.appender.file.layout.ConversionPattern=%d | %-5p | %m | %c | %t%n
log4j.appender.file.file=target/test.log
log4j.appender.file.append=true

View File

@ -0,0 +1,44 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb
import org.apache.activemq.spring.ActiveMQConnectionFactory
import javax.jms.{Destination, ConnectionFactory}
import org.apache.activemq.command.{ActiveMQTopic, ActiveMQQueue}
/**
* <p>
* ActiveMQ implementation of the JMS Scenario class.
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class ActiveMQScenario extends JMSClientScenario {
override protected def factory:ConnectionFactory = {
val rc = new ActiveMQConnectionFactory
rc.setBrokerURL(url)
rc
}
override protected def destination(i:Int):Destination = destination_type match {
case "queue" => new ActiveMQQueue(indexed_destination_name(i))
case "topic" => new ActiveMQTopic(indexed_destination_name(i))
case _ => error("Unsuported destination type: "+destination_type)
}
}

View File

@ -0,0 +1,174 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb
import junit.framework.TestCase
import org.apache.activemq.broker._
import org.apache.activemq.store._
import java.io.File
import junit.framework.Assert._
import org.apache.commons.math.stat.descriptive.DescriptiveStatistics
import region.policy.{PolicyEntry, PolicyMap}
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class EnqueueRateScenariosTest extends TestCase {
var broker: BrokerService = null
override def setUp() {
import collection.JavaConversions._
broker = new BrokerService
broker.setDeleteAllMessagesOnStartup(true)
broker.setPersistenceAdapter(createStore)
broker.addConnector("tcp://0.0.0.0:0")
// val policies = new PolicyMap();
// val entry = new PolicyEntry
// entry.setQueue(">")
// policies.setPolicyEntries(List(entry))
// broker.setDestinationPolicy(policies)
broker.start
broker.waitUntilStarted()
}
override def tearDown() = {
if (broker != null) {
broker.stop
broker.waitUntilStopped
}
}
protected def canceledEnqueues() =
broker.getPersistenceAdapter.asInstanceOf[LevelDBStore].db.uowCanceledCounter
protected def enqueueOptimized() =
broker.getPersistenceAdapter.asInstanceOf[LevelDBStore].db.uowEnqueueDelayReqested
protected def enqueueNotOptimized() =
broker.getPersistenceAdapter.asInstanceOf[LevelDBStore].db.uowEnqueueNodelayReqested
protected def createStore: PersistenceAdapter = {
var store: LevelDBStore = new LevelDBStore
store.setDirectory(new File("target/activemq-data/leveldb"))
return store
}
def collect_benchmark(scenario:ActiveMQScenario, warmup:Int, samples_count:Int) = {
val (cancels, optimized, unoptimized) = scenario.with_load {
println("Warming up for %d seconds...".format(warmup))
Thread.sleep(warmup*1000)
println("Sampling...")
scenario.collection_start
val cancelStart = canceledEnqueues
val enqueueOptimizedStart = enqueueOptimized
val enqueueNotOptimizedStart = enqueueNotOptimized
for (i <- 0 until samples_count) {
Thread.sleep(1000);
scenario.collection_sample
}
(canceledEnqueues-cancelStart, enqueueOptimized-enqueueOptimizedStart, enqueueNotOptimized-enqueueNotOptimizedStart)
}
println("Done.")
var samples = scenario.collection_end
val error_rates = samples.get("e_custom").get.map(_._2)
assertFalse("Errors occured during scenario run: "+error_rates, error_rates.find(_ > 0 ).isDefined )
val producer_stats = new DescriptiveStatistics();
for( producer_rates <- samples.get("p_custom") ) {
for( i <- producer_rates ) {
producer_stats.addValue(i._2)
}
}
val consumer_stats = new DescriptiveStatistics();
for( consumer_rates <- samples.get("c_custom") ) {
for( i <- consumer_rates ) {
consumer_stats.addValue(i._2)
}
}
(producer_stats, consumer_stats, cancels*1.0/samples_count, optimized*1.0/samples_count, unoptimized*1.0/samples_count)
}
def benchmark(name:String, warmup:Int=3, samples_count:Int=15, async_send:Boolean=true)(setup:(ActiveMQScenario)=>Unit) = {
println("Benchmarking: "+name)
var options: String = "?jms.watchTopicAdvisories=false&jms.useAsyncSend="+async_send
val url = broker.getTransportConnectors.get(0).getConnectUri + options
val scenario = new ActiveMQScenario
scenario.url = url
scenario.display_errors = true
scenario.persistent = true
scenario.message_size = 1024 * 3
setup(scenario)
val (producer_stats, consumer_stats, cancels, optimized, unoptimized) = collect_benchmark(scenario, warmup, samples_count)
println("%s: producer avg msg/sec: %,.2f, stddev: %,.2f".format(name, producer_stats.getMean, producer_stats.getStandardDeviation))
println("%s: consumer avg msg/sec: %,.2f, stddev: %,.2f".format(name, consumer_stats.getMean, consumer_stats.getStandardDeviation))
println("%s: canceled enqueues/sec: %,.2f".format(name,cancels))
println("%s: optimized enqueues/sec: %,.2f".format(name,optimized))
println("%s: unoptimized enqueues/sec: %,.2f".format(name,unoptimized))
(producer_stats, consumer_stats, cancels, optimized, unoptimized)
}
def testHighCancelRatio = {
val (producer_stats, consumer_stats, cancels, optimized, unoptimized) = benchmark("both_connected_baseline") { scenario=>
scenario.producers = 1
scenario.consumers = 1
}
val cancel_ratio = cancels / producer_stats.getMean
assertTrue("Expecting more than 80%% of the enqueues get canceled. But only %.2f%% was canceled".format(cancel_ratio*100), cancel_ratio > .80)
}
def testDecoupledProducerRate = {
// Fill up the queue with messages.. for the benefit of the next benchmark..
val from_1_to_0 = benchmark("from_1_to_0", 60) { scenario=>
scenario.producers = 1
scenario.consumers = 0
}
val from_1_to_10 = benchmark("from_1_to_10") { scenario=>
scenario.producers = 1
scenario.consumers = 10
}
val from_1_to_1 = benchmark("from_1_to_1") { scenario=>
scenario.producers = 1
scenario.consumers = 1
}
var percent_diff0 = (1.0 - (from_1_to_0._1.getMean / from_1_to_1._1.getMean)).abs * 100
var percent_diff1 = (1.0 - (from_1_to_1._1.getMean / from_1_to_10._1.getMean)).abs * 100
var msg0 = "The 0 vs 1 consumer scenario producer rate was within %.2f%%".format(percent_diff0)
var msg1 = "The 1 vs 10 consumer scenario producer rate was within %.2f%%".format(percent_diff1)
println(msg0)
println(msg1)
assertTrue(msg0, percent_diff0 <= 60)
assertTrue(msg1, percent_diff1 <= 20)
}
}

View File

@ -0,0 +1,62 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb
import org.apache.hadoop.fs.FileUtil
import java.io.File
import java.util.concurrent.TimeUnit
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class HALevelDBFastEnqueueTest extends LevelDBFastEnqueueTest {
override def setUp: Unit = {
TestingHDFSServer.start
super.setUp
}
override def tearDown: Unit = {
super.tearDown
TestingHDFSServer.stop
}
override protected def createStore: LevelDBStore = {
var store: HALevelDBStore = new HALevelDBStore
store.setDirectory(dataDirectory)
store.setDfsDirectory("target/activemq-data/hdfs-leveldb")
return store
}
private def dataDirectory: File = {
return new File("target/activemq-data/leveldb")
}
/**
* On restart we will also delete the local file system store, so that we test restoring from
* HDFS.
*/
override protected def restartBroker(restartDelay: Int, checkpoint: Int): Unit = {
stopBroker
FileUtil.fullyDelete(dataDirectory)
TimeUnit.MILLISECONDS.sleep(restartDelay)
startBroker(false, checkpoint)
}
}

View File

@ -0,0 +1,48 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb
import org.apache.activemq.store.PersistenceAdapter
import java.io.File
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class HALevelDBStoreTest extends LevelDBStoreTest {
override protected def setUp: Unit = {
TestingHDFSServer.start
super.setUp
}
override protected def tearDown: Unit = {
super.tearDown
TestingHDFSServer.stop
}
override protected def createPersistenceAdapter(delete: Boolean): PersistenceAdapter = {
var store: HALevelDBStore = new HALevelDBStore
store.setDirectory(new File("target/activemq-data/haleveldb"))
store.setDfsDirectory("localhost")
if (delete) {
store.deleteAllMessages
}
return store
}
}

View File

@ -0,0 +1,26 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb
import org.apache.activemq.console.Main
object IDERunner {
def main(args:Array[String]) ={
Main.main(args)
}
}

View File

@ -0,0 +1,204 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb
import java.lang.Thread
import javax.jms._
/**
* <p>
* Simulates load on a JMS sever using the JMS messaging API.
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
abstract class JMSClientScenario extends Scenario {
def createProducer(i:Int) = {
new ProducerClient(i)
}
def createConsumer(i:Int) = {
new ConsumerClient(i)
}
protected def destination(i:Int):Destination
def indexed_destination_name(i:Int) = destination_type match {
case "queue" => queue_prefix+destination_name+"-"+(i%destination_count)
case "topic" => topic_prefix+destination_name+"-"+(i%destination_count)
case _ => error("Unsuported destination type: "+destination_type)
}
protected def factory:ConnectionFactory
def jms_ack_mode = {
ack_mode match {
case "auto" => Session.AUTO_ACKNOWLEDGE
case "client" => Session.CLIENT_ACKNOWLEDGE
case "dups_ok" => Session.DUPS_OK_ACKNOWLEDGE
case "transacted" => Session.SESSION_TRANSACTED
case _ => throw new Exception("Invalid ack mode: "+ack_mode)
}
}
trait JMSClient extends Client {
@volatile
var connection:Connection = _
var message_counter=0L
var worker = new Thread() {
override def run() {
var reconnect_delay = 0
while( !done.get ) {
try {
if( reconnect_delay!=0 ) {
Thread.sleep(reconnect_delay)
reconnect_delay=0
}
connection = factory.createConnection(user_name, password)
// connection.setClientID(name)
connection.setExceptionListener(new ExceptionListener {
def onException(exception: JMSException) {
}
})
connection.start()
execute
} catch {
case e:Throwable =>
if( !done.get ) {
if( display_errors ) {
e.printStackTrace
}
error_counter.incrementAndGet
reconnect_delay = 1000
}
} finally {
dispose
}
}
}
}
def dispose {
try {
connection.close()
} catch {
case _ =>
}
}
def execute:Unit
def start = {
worker.start
}
def shutdown = {
assert(done.get)
if ( worker!=null ) {
dispose
worker.join(1000)
while(worker.isAlive ) {
println("Worker did not shutdown quickly.. interrupting thread.")
worker.interrupt()
worker.join(1000)
}
worker = null
}
}
def name:String
}
class ConsumerClient(val id: Int) extends JMSClient {
val name: String = "consumer " + id
def execute {
var session = connection.createSession(false, jms_ack_mode)
var consumer:MessageConsumer = if( durable ) {
session.createDurableSubscriber(destination(id).asInstanceOf[Topic], name, selector, no_local)
} else {
session.createConsumer(destination(id), selector, no_local)
}
while( !done.get() ) {
val msg = consumer.receive(500)
if( msg!=null ) {
consumer_counter.incrementAndGet()
if (consumer_sleep != 0) {
Thread.sleep(consumer_sleep)
}
if(session.getAcknowledgeMode == Session.CLIENT_ACKNOWLEDGE) {
msg.acknowledge();
}
}
}
}
}
class ProducerClient(val id: Int) extends JMSClient {
val name: String = "producer " + id
def execute {
val session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE)
val producer:MessageProducer = session.createProducer(destination(id))
producer.setDeliveryMode(if( persistent ) {
DeliveryMode.PERSISTENT
} else {
DeliveryMode.NON_PERSISTENT
})
val msg = session.createTextMessage(body(name))
headers_for(id).foreach { case (key, value) =>
msg.setStringProperty(key, value)
}
while( !done.get() ) {
producer.send(msg)
producer_counter.incrementAndGet()
if (producer_sleep != 0) {
Thread.sleep(producer_sleep)
}
}
}
}
def body(name:String) = {
val buffer = new StringBuffer(message_size)
buffer.append("Message from " + name+"\n")
for( i <- buffer.length to message_size ) {
buffer.append(('a'+(i%26)).toChar)
}
var rc = buffer.toString
if( rc.length > message_size ) {
rc.substring(0, message_size)
} else {
rc
}
}
}

View File

@ -0,0 +1,206 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb
import org.apache.activemq.ActiveMQConnection
import org.apache.activemq.ActiveMQConnectionFactory
import org.apache.activemq.broker.BrokerService
import org.apache.activemq.command.ActiveMQQueue
import org.apache.activemq.command.ConnectionControl
import org.junit.After
import org.junit.Before
import org.junit.Test
import javax.jms._
import java.io.File
import java.util.Vector
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicLong
import junit.framework.Assert._
import org.apache.activemq.leveldb.util.Log
import junit.framework.TestCase
object LevelDBFastEnqueueTest extends Log
class LevelDBFastEnqueueTest extends TestCase {
import LevelDBFastEnqueueTest._
@Test def testPublishNoConsumer: Unit = {
startBroker(true, 10)
val sharedCount: AtomicLong = new AtomicLong(toSend)
var start: Long = System.currentTimeMillis
var executorService: ExecutorService = Executors.newCachedThreadPool
var i: Int = 0
while (i < parallelProducer) {
executorService.execute(new Runnable {
def run: Unit = {
try {
publishMessages(sharedCount, 0)
}
catch {
case e: Exception => {
exceptions.add(e)
}
}
}
})
i += 1
}
executorService.shutdown
executorService.awaitTermination(30, TimeUnit.MINUTES)
assertTrue("Producers done in time", executorService.isTerminated)
assertTrue("No exceptions: " + exceptions, exceptions.isEmpty)
var totalSent: Long = toSend * payloadString.length
var duration: Double = System.currentTimeMillis - start
info("Duration: " + duration + "ms")
info("Rate: " + (toSend * 1000 / duration) + "m/s")
info("Total send: " + totalSent)
info("Total journal write: " + store.getLogAppendPosition)
info("Journal writes %: " + store.getLogAppendPosition / totalSent.asInstanceOf[Double] * 100 + "%")
stopBroker
restartBroker(0, 1200000)
consumeMessages(toSend)
}
@Test def testPublishNoConsumerNoCheckpoint: Unit = {
toSend = 100
startBroker(true, 0)
val sharedCount: AtomicLong = new AtomicLong(toSend)
var start: Long = System.currentTimeMillis
var executorService: ExecutorService = Executors.newCachedThreadPool
var i: Int = 0
while (i < parallelProducer) {
executorService.execute(new Runnable {
def run: Unit = {
try {
publishMessages(sharedCount, 0)
}
catch {
case e: Exception => {
exceptions.add(e)
}
}
}
})
i += 1;
}
executorService.shutdown
executorService.awaitTermination(30, TimeUnit.MINUTES)
assertTrue("Producers done in time", executorService.isTerminated)
assertTrue("No exceptions: " + exceptions, exceptions.isEmpty)
var totalSent: Long = toSend * payloadString.length
broker.getAdminView.gc
var duration: Double = System.currentTimeMillis - start
info("Duration: " + duration + "ms")
info("Rate: " + (toSend * 1000 / duration) + "m/s")
info("Total send: " + totalSent)
info("Total journal write: " + store.getLogAppendPosition)
info("Journal writes %: " + store.getLogAppendPosition / totalSent.asInstanceOf[Double] * 100 + "%")
stopBroker
restartBroker(0, 0)
consumeMessages(toSend)
}
private def consumeMessages(count: Long): Unit = {
var connection: ActiveMQConnection = connectionFactory.createConnection.asInstanceOf[ActiveMQConnection]
connection.setWatchTopicAdvisories(false)
connection.start
var session: Session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE)
var consumer: MessageConsumer = session.createConsumer(destination)
var i: Int = 0
while (i < count) {
assertNotNull("got message " + i, consumer.receive(10000))
i += 1;
}
assertNull("none left over", consumer.receive(2000))
}
protected def restartBroker(restartDelay: Int, checkpoint: Int): Unit = {
stopBroker
TimeUnit.MILLISECONDS.sleep(restartDelay)
startBroker(false, checkpoint)
}
override def tearDown() = stopBroker
def stopBroker: Unit = {
if (broker != null) {
broker.stop
broker.waitUntilStopped
}
}
private def publishMessages(count: AtomicLong, expiry: Int): Unit = {
var connection: ActiveMQConnection = connectionFactory.createConnection.asInstanceOf[ActiveMQConnection]
connection.setWatchTopicAdvisories(false)
connection.start
var session: Session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE)
var producer: MessageProducer = session.createProducer(destination)
var start: Long = System.currentTimeMillis
var i: Long = 0l
var bytes: Array[Byte] = payloadString.getBytes
while ((({
i = count.getAndDecrement; i
})) > 0) {
var message: Message = null
if (useBytesMessage) {
message = session.createBytesMessage
(message.asInstanceOf[BytesMessage]).writeBytes(bytes)
}
else {
message = session.createTextMessage(payloadString)
}
producer.send(message, DeliveryMode.PERSISTENT, 5, expiry)
if (i != toSend && i % sampleRate == 0) {
var now: Long = System.currentTimeMillis
info("Remainder: " + i + ", rate: " + sampleRate * 1000 / (now - start) + "m/s")
start = now
}
}
connection.syncSendPacket(new ConnectionControl)
connection.close
}
def startBroker(deleteAllMessages: Boolean, checkPointPeriod: Int): Unit = {
broker = new BrokerService
broker.setDeleteAllMessagesOnStartup(deleteAllMessages)
store = createStore
broker.setPersistenceAdapter(store)
broker.addConnector("tcp://0.0.0.0:0")
broker.start
var options: String = "?jms.watchTopicAdvisories=false&jms.useAsyncSend=true&jms.alwaysSessionAsync=false&jms.dispatchAsync=false&socketBufferSize=131072&ioBufferSize=16384&wireFormat.tightEncodingEnabled=false&wireFormat.cacheSize=8192"
connectionFactory = new ActiveMQConnectionFactory(broker.getTransportConnectors.get(0).getConnectUri + options)
}
protected def createStore: LevelDBStore = {
var store: LevelDBStore = new LevelDBStore
store.setDirectory(new File("target/activemq-data/leveldb"))
return store
}
private[leveldb] var broker: BrokerService = null
private[leveldb] var connectionFactory: ActiveMQConnectionFactory = null
private[leveldb] var store: LevelDBStore = null
private[leveldb] var destination: Destination = new ActiveMQQueue("Test")
private[leveldb] var payloadString: String = new String(new Array[Byte](6 * 1024))
private[leveldb] var useBytesMessage: Boolean = true
private[leveldb] final val parallelProducer: Int = 20
private[leveldb] var exceptions: Vector[Exception] = new Vector[Exception]
private[leveldb] var toSend: Long = 100000
private[leveldb] final val sampleRate: Double = 100000
}

View File

@ -0,0 +1,61 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb
import org.apache.activemq.broker.BrokerService
import org.apache.activemq.broker.BrokerTest
import org.apache.activemq.store.PersistenceAdapter
import java.io.File
import junit.framework.{TestSuite, Test}
/**
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
object LevelDBStoreBrokerTest {
def suite: Test = {
return new TestSuite(classOf[LevelDBStoreBrokerTest])
}
def main(args: Array[String]): Unit = {
junit.textui.TestRunner.run(suite)
}
}
class LevelDBStoreBrokerTest extends BrokerTest {
protected def createPersistenceAdapter(delete: Boolean): PersistenceAdapter = {
var store: LevelDBStore = new LevelDBStore
store.setDirectory(new File("target/activemq-data/leveldb"))
if (delete) {
store.deleteAllMessages
}
return store
}
protected override def createBroker: BrokerService = {
var broker: BrokerService = new BrokerService
broker.setPersistenceAdapter(createPersistenceAdapter(true))
return broker
}
protected def createRestartedBroker: BrokerService = {
var broker: BrokerService = new BrokerService
broker.setPersistenceAdapter(createPersistenceAdapter(false))
return broker
}
}

View File

@ -0,0 +1,41 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb
import org.apache.activemq.store.PersistenceAdapter
import org.apache.activemq.store.PersistenceAdapterTestSupport
import java.io.File
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class LevelDBStoreTest extends PersistenceAdapterTestSupport {
override def testStoreCanHandleDupMessages: Unit = {
}
protected def createPersistenceAdapter(delete: Boolean): PersistenceAdapter = {
var store: LevelDBStore = new LevelDBStore
store.setDirectory(new File("target/activemq-data/haleveldb"))
if (delete) {
store.deleteAllMessages
}
return store
}
}

View File

@ -0,0 +1,331 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb
import java.util.concurrent.atomic._
import java.util.concurrent.TimeUnit._
import scala.collection.mutable.ListBuffer
object Scenario {
val MESSAGE_ID:Array[Byte] = "message-id"
val NEWLINE = '\n'.toByte
val NANOS_PER_SECOND = NANOSECONDS.convert(1, SECONDS)
implicit def toBytes(value: String):Array[Byte] = value.getBytes("UTF-8")
def o[T](value:T):Option[T] = value match {
case null => None
case x => Some(x)
}
}
trait Scenario {
import Scenario._
var url:String = "tcp://localhost:61616"
var user_name:String = _
var password:String = _
private var _producer_sleep: { def apply(): Int; def init(time: Long) } = new { def apply() = 0; def init(time: Long) {} }
def producer_sleep = _producer_sleep()
def producer_sleep_= (new_value: Int) = _producer_sleep = new { def apply() = new_value; def init(time: Long) {} }
def producer_sleep_= (new_func: { def apply(): Int; def init(time: Long) }) = _producer_sleep = new_func
private var _consumer_sleep: { def apply(): Int; def init(time: Long) } = new { def apply() = 0; def init(time: Long) {} }
def consumer_sleep = _consumer_sleep()
def consumer_sleep_= (new_value: Int) = _consumer_sleep = new { def apply() = new_value; def init(time: Long) {} }
def consumer_sleep_= (new_func: { def apply(): Int; def init(time: Long) }) = _consumer_sleep = new_func
var producers = 1
var producers_per_sample = 0
var consumers = 1
var consumers_per_sample = 0
var sample_interval = 1000
var message_size = 1024
var persistent = false
var headers = Array[Array[(String,String)]]()
var selector:String = null
var no_local = false
var durable = false
var ack_mode = "auto"
var messages_per_connection = -1L
var display_errors = false
var destination_type = "queue"
private var _destination_name: () => String = () => "load"
def destination_name = _destination_name()
def destination_name_=(new_name: String) = _destination_name = () => new_name
def destination_name_=(new_func: () => String) = _destination_name = new_func
var destination_count = 1
val producer_counter = new AtomicLong()
val consumer_counter = new AtomicLong()
val error_counter = new AtomicLong()
val done = new AtomicBoolean()
var queue_prefix = ""
var topic_prefix = ""
var name = "custom"
var drain_timeout = 2000L
def run() = {
print(toString)
println("--------------------------------------")
println(" Running: Press ENTER to stop")
println("--------------------------------------")
println("")
with_load {
// start a sampling client...
val sample_thread = new Thread() {
override def run() = {
def print_rate(name: String, periodCount:Long, totalCount:Long, nanos: Long) = {
val rate_per_second: java.lang.Float = ((1.0f * periodCount / nanos) * NANOS_PER_SECOND)
println("%s total: %,d, rate: %,.3f per second".format(name, totalCount, rate_per_second))
}
try {
var start = System.nanoTime
var total_producer_count = 0L
var total_consumer_count = 0L
var total_error_count = 0L
collection_start
while( !done.get ) {
Thread.sleep(sample_interval)
val end = System.nanoTime
collection_sample
val samples = collection_end
samples.get("p_custom").foreach { case (_, count)::Nil =>
total_producer_count += count
print_rate("Producer", count, total_producer_count, end - start)
case _ =>
}
samples.get("c_custom").foreach { case (_, count)::Nil =>
total_consumer_count += count
print_rate("Consumer", count, total_consumer_count, end - start)
case _ =>
}
samples.get("e_custom").foreach { case (_, count)::Nil =>
if( count!= 0 ) {
total_error_count += count
print_rate("Error", count, total_error_count, end - start)
}
case _ =>
}
start = end
}
} catch {
case e:InterruptedException =>
}
}
}
sample_thread.start()
System.in.read()
done.set(true)
sample_thread.interrupt
sample_thread.join
}
}
override def toString() = {
"--------------------------------------\n"+
"Scenario Settings\n"+
"--------------------------------------\n"+
" destination_type = "+destination_type+"\n"+
" queue_prefix = "+queue_prefix+"\n"+
" topic_prefix = "+topic_prefix+"\n"+
" destination_count = "+destination_count+"\n" +
" destination_name = "+destination_name+"\n" +
" sample_interval (ms) = "+sample_interval+"\n" +
" \n"+
" --- Producer Properties ---\n"+
" producers = "+producers+"\n"+
" message_size = "+message_size+"\n"+
" persistent = "+persistent+"\n"+
" producer_sleep (ms) = "+producer_sleep+"\n"+
" headers = "+headers.mkString(", ")+"\n"+
" \n"+
" --- Consumer Properties ---\n"+
" consumers = "+consumers+"\n"+
" consumer_sleep (ms) = "+consumer_sleep+"\n"+
" selector = "+selector+"\n"+
" durable = "+durable+"\n"+
""
}
protected def headers_for(i:Int) = {
if ( headers.isEmpty ) {
Array[(String, String)]()
} else {
headers(i%headers.size)
}
}
var producer_samples:Option[ListBuffer[(Long,Long)]] = None
var consumer_samples:Option[ListBuffer[(Long,Long)]] = None
var error_samples = ListBuffer[(Long,Long)]()
def collection_start: Unit = {
producer_counter.set(0)
consumer_counter.set(0)
error_counter.set(0)
producer_samples = if (producers > 0 || producers_per_sample>0 ) {
Some(ListBuffer[(Long,Long)]())
} else {
None
}
consumer_samples = if (consumers > 0 || consumers_per_sample>0 ) {
Some(ListBuffer[(Long,Long)]())
} else {
None
}
}
def collection_end: Map[String, scala.List[(Long,Long)]] = {
var rc = Map[String, List[(Long,Long)]]()
producer_samples.foreach{ samples =>
rc += "p_"+name -> samples.toList
samples.clear
}
consumer_samples.foreach{ samples =>
rc += "c_"+name -> samples.toList
samples.clear
}
rc += "e_"+name -> error_samples.toList
error_samples.clear
rc
}
trait Client {
def start():Unit
def shutdown():Unit
}
var producer_clients = List[Client]()
var consumer_clients = List[Client]()
def with_load[T](func: =>T ):T = {
done.set(false)
_producer_sleep.init(System.currentTimeMillis())
_consumer_sleep.init(System.currentTimeMillis())
for (i <- 0 until producers) {
val client = createProducer(i)
producer_clients ::= client
client.start()
}
for (i <- 0 until consumers) {
val client = createConsumer(i)
consumer_clients ::= client
client.start()
}
try {
func
} finally {
done.set(true)
// wait for the threads to finish..
for( client <- consumer_clients ) {
client.shutdown
}
consumer_clients = List()
for( client <- producer_clients ) {
client.shutdown
}
producer_clients = List()
}
}
def drain = {
done.set(false)
if( destination_type=="queue" || destination_type=="raw_queue" || durable==true ) {
print("draining")
consumer_counter.set(0)
var consumer_clients = List[Client]()
for (i <- 0 until destination_count) {
val client = createConsumer(i)
consumer_clients ::= client
client.start()
}
// Keep sleeping until we stop draining messages.
var drained = 0L
try {
Thread.sleep(drain_timeout);
def done() = {
val c = consumer_counter.getAndSet(0)
drained += c
c == 0
}
while( !done ) {
print(".")
Thread.sleep(drain_timeout);
}
} finally {
done.set(true)
for( client <- consumer_clients ) {
client.shutdown
}
println(". (drained %d)".format(drained))
}
}
}
def collection_sample: Unit = {
val now = System.currentTimeMillis()
producer_samples.foreach(_.append((now, producer_counter.getAndSet(0))))
consumer_samples.foreach(_.append((now, consumer_counter.getAndSet(0))))
error_samples.append((now, error_counter.getAndSet(0)))
// we might need to increment number the producers..
for (i <- 0 until producers_per_sample) {
val client = createProducer(producer_clients.length)
producer_clients ::= client
client.start()
}
// we might need to increment number the consumers..
for (i <- 0 until consumers_per_sample) {
val client = createConsumer(consumer_clients.length)
consumer_clients ::= client
client.start()
}
}
def createProducer(i:Int):Client
def createConsumer(i:Int):Client
}

View File

@ -0,0 +1,51 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.hdfs.MiniDFSCluster
import java.io.IOException
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
object TestingHDFSServer {
private[leveldb] def start: Unit = {
var conf: Configuration = new Configuration
cluster = new MiniDFSCluster(conf, 1, true, null)
cluster.waitActive
fs = cluster.getFileSystem
}
private[leveldb] def stop: Unit = {
try {
cluster.shutdown
}
catch {
case e: Throwable => {
e.printStackTrace
}
}
}
private[leveldb] var cluster: MiniDFSCluster = null
private[leveldb] var fs: FileSystem = null
}

View File

@ -64,11 +64,13 @@
<fusemq-leveldb-version>1.3</fusemq-leveldb-version>
<ftpserver-version>1.0.6</ftpserver-version>
<geronimo-version>1.0</geronimo-version>
<hadoop-version>1.0.0</hadoop-version>
<hawtbuf-version>1.9</hawtbuf-version>
<hawtdispatch-version>1.11</hawtdispatch-version>
<howl-version>0.1.8</howl-version>
<hsqldb-version>1.8.0.10</hsqldb-version>
<httpclient-version>4.2.1</httpclient-version>
<jackson-version>1.9.2</jackson-version>
<jasypt-version>1.9.0</jasypt-version>
<jdom-version>1.0</jdom-version>
<jetty-version>7.6.7.v20120910</jetty-version>
@ -93,6 +95,9 @@
<rome-version>1.0</rome-version>
<saxon-version>9.4</saxon-version>
<saxon-bundle-version>9.4.0.1_2</saxon-bundle-version>
<scala-plugin-version>2.15.1</scala-plugin-version>
<scala-version>2.9.1</scala-version>
<scalatest-version>1.8</scalatest-version>
<slf4j-version>1.6.6</slf4j-version>
<spring-version>3.0.7.RELEASE</spring-version>
<spring-osgi-version>1.2.1</spring-osgi-version>
@ -198,6 +203,7 @@
<module>activemq-jaas</module>
<module>activemq-blueprint</module>
<module>activemq-karaf</module>
<module>activemq-leveldb</module>
<module>activemq-openwire-generator</module>
<module>activemq-optional</module>
<module>activemq-pool</module>