diff --git a/activemq-core/pom.xml b/activemq-core/pom.xml index 77f1ecfe37..d87ca8a284 100755 --- a/activemq-core/pom.xml +++ b/activemq-core/pom.xml @@ -1056,6 +1056,8 @@ + + org.codehaus.mojo cobertura-maven-plugin @@ -1222,5 +1228,47 @@ + + + xbean-generate + + + + org.apache.xbean + maven-xbean-plugin + 3.11.2-SNAPSHOT + + + process-classes + + + ${basedir}/../activemq-leveldb/src/main/java + + + ${basedir}/../activemq-leveldb/target/classes + + false + http://activemq.apache.org/schema/core + ${basedir}/src/main/resources/activemq.xsd + ${basedir}/src/main/resources + false + org.apache.activemq.broker.jmx.AnnotatedMBean,org.apache.activemq.broker.jmx.DestinationViewMBean + + + mapping + + + + + + com.thoughtworks.qdox + qdox + 1.12 + + + + + + diff --git a/activemq-core/src/main/resources/META-INF/services/org/apache/xbean/spring/http/activemq.apache.org/schema/core b/activemq-core/src/main/resources/META-INF/services/org/apache/xbean/spring/http/activemq.apache.org/schema/core new file mode 100644 index 0000000000..e2ab90d92d --- /dev/null +++ b/activemq-core/src/main/resources/META-INF/services/org/apache/xbean/spring/http/activemq.apache.org/schema/core @@ -0,0 +1,381 @@ +# NOTE: this file is autogenerated by Apache XBean + +# beans +abortSlowConsumerStrategy = org.apache.activemq.broker.region.policy.AbortSlowConsumerStrategy + +amqPersistenceAdapter = org.apache.activemq.store.amq.AMQPersistenceAdapter +amqPersistenceAdapter.indexPageSize.propertyEditor = org.apache.activemq.util.MemoryPropertyEditor +amqPersistenceAdapter.maxCheckpointMessageAddSize.propertyEditor = org.apache.activemq.util.MemoryIntPropertyEditor +amqPersistenceAdapter.maxFileLength.propertyEditor = org.apache.activemq.util.MemoryPropertyEditor +amqPersistenceAdapter.maxReferenceFileLength.propertyEditor = org.apache.activemq.util.MemoryPropertyEditor + +amqPersistenceAdapterFactory = org.apache.activemq.store.amq.AMQPersistenceAdapterFactory + +authenticationUser = org.apache.activemq.security.AuthenticationUser +org.apache.activemq.security.AuthenticationUser(java.lang.String,java.lang.String,java.lang.String).parameterNames = username password groups + +authorizationEntry = org.apache.activemq.security.AuthorizationEntry + +authorizationMap = org.apache.activemq.security.DefaultAuthorizationMap +org.apache.activemq.security.DefaultAuthorizationMap(java.util.List).parameterNames = authorizationEntries + +authorizationPlugin = org.apache.activemq.security.AuthorizationPlugin +org.apache.activemq.security.AuthorizationPlugin(org.apache.activemq.security.AuthorizationMap).parameterNames = map + +axionJDBCAdapter = org.apache.activemq.store.jdbc.adapter.AxionJDBCAdapter + +blobJDBCAdapter = org.apache.activemq.store.jdbc.adapter.BlobJDBCAdapter + +broker = org.apache.activemq.xbean.XBeanBrokerService +broker.initMethod = afterPropertiesSet +broker.destroyMethod = destroy +broker.advisorySupport.propertyEditor = org.apache.activemq.util.BooleanEditor +broker.deleteAllMessagesOnStartup.propertyEditor = org.apache.activemq.util.BooleanEditor +broker.passiveSlave.propertyEditor = org.apache.activemq.util.BooleanEditor +broker.persistent.propertyEditor = org.apache.activemq.util.BooleanEditor +broker.schedulerSupport.propertyEditor = org.apache.activemq.util.BooleanEditor +broker.shutdownOnSlaveFailure.propertyEditor = org.apache.activemq.util.BooleanEditor +broker.systemExitOnShutdown.propertyEditor = org.apache.activemq.util.BooleanEditor +broker.useJmx.propertyEditor = org.apache.activemq.util.BooleanEditor +broker.waitForSlave.propertyEditor = org.apache.activemq.util.BooleanEditor + +brokerService = org.apache.activemq.broker.BrokerService +brokerService.advisorySupport.propertyEditor = org.apache.activemq.util.BooleanEditor +brokerService.deleteAllMessagesOnStartup.propertyEditor = org.apache.activemq.util.BooleanEditor +brokerService.passiveSlave.propertyEditor = org.apache.activemq.util.BooleanEditor +brokerService.persistent.propertyEditor = org.apache.activemq.util.BooleanEditor +brokerService.schedulerSupport.propertyEditor = org.apache.activemq.util.BooleanEditor +brokerService.shutdownOnSlaveFailure.propertyEditor = org.apache.activemq.util.BooleanEditor +brokerService.systemExitOnShutdown.propertyEditor = org.apache.activemq.util.BooleanEditor +brokerService.useJmx.propertyEditor = org.apache.activemq.util.BooleanEditor +brokerService.waitForSlave.propertyEditor = org.apache.activemq.util.BooleanEditor + +bytesJDBCAdapter = org.apache.activemq.store.jdbc.adapter.BytesJDBCAdapter + +cachedLDAPAuthorizationMap = org.apache.activemq.security.CachedLDAPAuthorizationMap + +commandAgent = org.apache.activemq.broker.util.CommandAgent +commandAgent.initMethod = start +commandAgent.destroyMethod = stop + +compositeDemandForwardingBridge = org.apache.activemq.network.CompositeDemandForwardingBridge +org.apache.activemq.network.CompositeDemandForwardingBridge(org.apache.activemq.network.NetworkBridgeConfiguration,org.apache.activemq.transport.Transport,org.apache.activemq.transport.Transport).parameterNames = configuration localBroker remoteBroker + +compositeQueue = org.apache.activemq.broker.region.virtual.CompositeQueue + +compositeTopic = org.apache.activemq.broker.region.virtual.CompositeTopic + +conditionalNetworkBridgeFilterFactory = org.apache.activemq.network.ConditionalNetworkBridgeFilterFactory + +connectionDotFilePlugin = org.apache.activemq.broker.view.ConnectionDotFilePlugin + +connectionFactory = org.apache.activemq.spring.ActiveMQConnectionFactory +connectionFactory.initMethod = afterPropertiesSet + +constantPendingMessageLimitStrategy = org.apache.activemq.broker.region.policy.ConstantPendingMessageLimitStrategy + +database-locker = org.apache.activemq.store.jdbc.DefaultDatabaseLocker + +db2JDBCAdapter = org.apache.activemq.store.jdbc.adapter.DB2JDBCAdapter + +defaultIOExceptionHandler = org.apache.activemq.util.DefaultIOExceptionHandler + +defaultJDBCAdapter = org.apache.activemq.store.jdbc.adapter.DefaultJDBCAdapter + +defaultNetworkBridgeFilterFactory = org.apache.activemq.network.DefaultNetworkBridgeFilterFactory + +defaultUsageCapacity = org.apache.activemq.usage.DefaultUsageCapacity + +demandForwardingBridge = org.apache.activemq.network.DemandForwardingBridge +org.apache.activemq.network.DemandForwardingBridge(org.apache.activemq.network.NetworkBridgeConfiguration,org.apache.activemq.transport.Transport,org.apache.activemq.transport.Transport).parameterNames = configuration localBroker remoteBroker + +destinationDotFilePlugin = org.apache.activemq.broker.view.DestinationDotFilePlugin + +destinationEntry = org.apache.activemq.filter.DefaultDestinationMapEntry + +destinationPathSeparatorPlugin = org.apache.activemq.broker.util.DestinationPathSeparatorBroker + +discardingDLQBrokerPlugin = org.apache.activemq.plugin.DiscardingDLQBrokerPlugin + +fileCursor = org.apache.activemq.broker.region.policy.FilePendingSubscriberMessageStoragePolicy + +fileDurableSubscriberCursor = org.apache.activemq.broker.region.policy.FilePendingDurableSubscriberMessageStoragePolicy + +fileQueueCursor = org.apache.activemq.broker.region.policy.FilePendingQueueMessageStoragePolicy + +filteredDestination = org.apache.activemq.broker.region.virtual.FilteredDestination + +filteredKahaDB = org.apache.activemq.store.kahadb.FilteredKahaDBPersistenceAdapter +org.apache.activemq.store.kahadb.FilteredKahaDBPersistenceAdapter(org.apache.activemq.command.ActiveMQDestination,org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter).parameterNames = destination adapter + +fixedCountSubscriptionRecoveryPolicy = org.apache.activemq.broker.region.policy.FixedCountSubscriptionRecoveryPolicy + +fixedSizedSubscriptionRecoveryPolicy = org.apache.activemq.broker.region.policy.FixedSizedSubscriptionRecoveryPolicy + +forcePersistencyModeBroker = org.apache.activemq.plugin.ForcePersistencyModeBroker +org.apache.activemq.plugin.ForcePersistencyModeBroker(org.apache.activemq.broker.Broker).parameterNames = next + +forcePersistencyModeBrokerPlugin = org.apache.activemq.plugin.ForcePersistencyModeBrokerPlugin + +forwardingBridge = org.apache.activemq.network.ForwardingBridge +org.apache.activemq.network.ForwardingBridge(org.apache.activemq.transport.Transport,org.apache.activemq.transport.Transport).parameterNames = localBroker remoteBroker + +hsqldb-jdbc-adapter = org.apache.activemq.store.jdbc.adapter.HsqldbJDBCAdapter + +imageBasedJDBCAdaptor = org.apache.activemq.store.jdbc.adapter.ImageBasedJDBCAdaptor + +inboundQueueBridge = org.apache.activemq.network.jms.InboundQueueBridge +org.apache.activemq.network.jms.InboundQueueBridge(java.lang.String).parameterNames = inboundQueueName + +inboundTopicBridge = org.apache.activemq.network.jms.InboundTopicBridge +org.apache.activemq.network.jms.InboundTopicBridge(java.lang.String).parameterNames = inboundTopicName + +individualDeadLetterStrategy = org.apache.activemq.broker.region.policy.IndividualDeadLetterStrategy + +informixJDBCAdapter = org.apache.activemq.store.jdbc.adapter.InformixJDBCAdapter + +jDBCIOExceptionHandler = org.apache.activemq.store.jdbc.JDBCIOExceptionHandler + +jaasAuthenticationPlugin = org.apache.activemq.security.JaasAuthenticationPlugin + +jaasCertificateAuthenticationPlugin = org.apache.activemq.security.JaasCertificateAuthenticationPlugin + +jaasDualAuthenticationPlugin = org.apache.activemq.security.JaasDualAuthenticationPlugin + +jdbcPersistenceAdapter = org.apache.activemq.store.jdbc.JDBCPersistenceAdapter +org.apache.activemq.store.jdbc.JDBCPersistenceAdapter(javax.sql.DataSource,org.apache.activemq.wireformat.WireFormat).parameterNames = ds wireFormat + +jmsQueueConnector = org.apache.activemq.network.jms.JmsQueueConnector + +jmsTopicConnector = org.apache.activemq.network.jms.JmsTopicConnector + +journalPersistenceAdapter = org.apache.activemq.store.journal.JournalPersistenceAdapter +org.apache.activemq.store.journal.JournalPersistenceAdapter(org.apache.activeio.journal.Journal,org.apache.activemq.store.PersistenceAdapter,org.apache.activemq.thread.TaskRunnerFactory).parameterNames = journal longTermPersistence taskRunnerFactory + +journalPersistenceAdapterFactory = org.apache.activemq.store.journal.JournalPersistenceAdapterFactory +journalPersistenceAdapterFactory.journalLogFileSize.propertyEditor = org.apache.activemq.util.MemoryIntPropertyEditor + +journaledJDBC = org.apache.activemq.store.PersistenceAdapterFactoryBean +journaledJDBC.journalLogFileSize.propertyEditor = org.apache.activemq.util.MemoryIntPropertyEditor + +kahaDB = org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter +kahaDB.indexCacheSize.propertyEditor = org.apache.activemq.util.MemoryPropertyEditor +kahaDB.indexWriteBatchSize.propertyEditor = org.apache.activemq.util.MemoryPropertyEditor +kahaDB.journalMaxFileLength.propertyEditor = org.apache.activemq.util.MemoryIntPropertyEditor +kahaDB.journalMaxWriteBatchSize.propertyEditor = org.apache.activemq.util.MemoryPropertyEditor + +kahaPersistenceAdapter = org.apache.activemq.store.kahadaptor.KahaPersistenceAdapter +kahaPersistenceAdapter.maxDataFileLength.propertyEditor = org.apache.activemq.util.MemoryPropertyEditor +org.apache.activemq.store.kahadaptor.KahaPersistenceAdapter(java.util.concurrent.atomic.AtomicLong).parameterNames = size + +lDAPAuthorizationMap = org.apache.activemq.security.LDAPAuthorizationMap +org.apache.activemq.security.LDAPAuthorizationMap(java.util.Map).parameterNames = options + +lastImageSubscriptionRecoveryPolicy = org.apache.activemq.broker.region.policy.LastImageSubscriptionRecoveryPolicy + +ldapNetworkConnector = org.apache.activemq.network.LdapNetworkConnector +ldapNetworkConnector.prefetchSize.propertyEditor = org.apache.activemq.util.MemoryIntPropertyEditor + +lease-database-locker = org.apache.activemq.store.jdbc.LeaseDatabaseLocker + +levelDB = org.apache.activemq.store.leveldb.LevelDBPersistenceAdapter + +loggingBrokerPlugin = org.apache.activemq.broker.util.LoggingBrokerPlugin +loggingBrokerPlugin.initMethod = afterPropertiesSet + +mKahaDB = org.apache.activemq.store.kahadb.MultiKahaDBPersistenceAdapter +mKahaDB.journalMaxFileLength.propertyEditor = org.apache.activemq.util.MemoryIntPropertyEditor +mKahaDB.journalWriteBatchSize.propertyEditor = org.apache.activemq.util.MemoryIntPropertyEditor + +managementContext = org.apache.activemq.broker.jmx.ManagementContext +managementContext.connectorPort.propertyEditor = org.apache.activemq.util.MemoryIntPropertyEditor +managementContext.createConnector.propertyEditor = org.apache.activemq.util.BooleanEditor +managementContext.rmiServerPort.propertyEditor = org.apache.activemq.util.MemoryIntPropertyEditor +org.apache.activemq.broker.jmx.ManagementContext(javax.management.MBeanServer).parameterNames = server + +masterConnector = org.apache.activemq.broker.ft.MasterConnector +org.apache.activemq.broker.ft.MasterConnector(java.lang.String).parameterNames = remoteUri + +maxdb-jdbc-adapter = org.apache.activemq.store.jdbc.adapter.MaxDBJDBCAdapter + +memoryPersistenceAdapter = org.apache.activemq.store.memory.MemoryPersistenceAdapter + +memoryUsage = org.apache.activemq.usage.MemoryUsage +memoryUsage.limit.propertyEditor = org.apache.activemq.util.MemoryPropertyEditor +memoryUsage.percentUsageMinDelta.propertyEditor = org.apache.activemq.util.MemoryPropertyEditor +org.apache.activemq.usage.MemoryUsage(org.apache.activemq.usage.MemoryUsage).parameterNames = parent +org.apache.activemq.usage.MemoryUsage(java.lang.String).parameterNames = name +org.apache.activemq.usage.MemoryUsage(org.apache.activemq.usage.MemoryUsage,java.lang.String).parameterNames = parent name +org.apache.activemq.usage.MemoryUsage(org.apache.activemq.usage.MemoryUsage,java.lang.String,float).parameterNames = parent name portion + +messageGroupHashBucketFactory = org.apache.activemq.broker.region.group.MessageGroupHashBucketFactory + +mirroredQueue = org.apache.activemq.broker.region.virtual.MirroredQueue + +multicastNetworkConnector = org.apache.activemq.network.MulticastNetworkConnector +multicastNetworkConnector.prefetchSize.propertyEditor = org.apache.activemq.util.MemoryIntPropertyEditor +org.apache.activemq.network.MulticastNetworkConnector(java.net.URI).parameterNames = remoteURI + +multicastTraceBrokerPlugin = org.apache.activemq.broker.util.MulticastTraceBrokerPlugin + +mysql-jdbc-adapter = org.apache.activemq.store.jdbc.adapter.MySqlJDBCAdapter + +networkConnector = org.apache.activemq.network.DiscoveryNetworkConnector +networkConnector.prefetchSize.propertyEditor = org.apache.activemq.util.MemoryIntPropertyEditor +org.apache.activemq.network.DiscoveryNetworkConnector(java.net.URI).parameterNames = discoveryURI + +noSubscriptionRecoveryPolicy = org.apache.activemq.broker.region.policy.NoSubscriptionRecoveryPolicy + +oldestMessageEvictionStrategy = org.apache.activemq.broker.region.policy.OldestMessageEvictionStrategy + +oldestMessageWithLowestPriorityEvictionStrategy = org.apache.activemq.broker.region.policy.OldestMessageWithLowestPriorityEvictionStrategy + +oracleBlobJDBCAdapter = org.apache.activemq.store.jdbc.adapter.OracleBlobJDBCAdapter + +oracleJDBCAdapter = org.apache.activemq.store.jdbc.adapter.OracleJDBCAdapter + +outboundQueueBridge = org.apache.activemq.network.jms.OutboundQueueBridge +org.apache.activemq.network.jms.OutboundQueueBridge(java.lang.String).parameterNames = outboundQueueName + +outboundTopicBridge = org.apache.activemq.network.jms.OutboundTopicBridge +org.apache.activemq.network.jms.OutboundTopicBridge(java.lang.String).parameterNames = outboundTopicName + +pListStore = org.apache.activemq.store.kahadb.plist.PListStore + +policyEntry = org.apache.activemq.broker.region.policy.PolicyEntry +policyEntry.memoryLimit.propertyEditor = org.apache.activemq.util.MemoryPropertyEditor + +policyMap = org.apache.activemq.broker.region.policy.PolicyMap + +postgresql-jdbc-adapter = org.apache.activemq.store.jdbc.adapter.PostgresqlJDBCAdapter + +prefetchPolicy = org.apache.activemq.ActiveMQPrefetchPolicy + +prefetchRatePendingMessageLimitStrategy = org.apache.activemq.broker.region.policy.PrefetchRatePendingMessageLimitStrategy + +priorityNetworkDispatchPolicy = org.apache.activemq.broker.region.policy.PriorityNetworkDispatchPolicy + +proxyConnector = org.apache.activemq.proxy.ProxyConnector + +queryBasedSubscriptionRecoveryPolicy = org.apache.activemq.broker.region.policy.QueryBasedSubscriptionRecoveryPolicy + +queue = org.apache.activemq.command.ActiveMQQueue +org.apache.activemq.command.ActiveMQQueue(java.lang.String).parameterNames = name + +queueDispatchSelector = org.apache.activemq.broker.region.QueueDispatchSelector +org.apache.activemq.broker.region.QueueDispatchSelector(org.apache.activemq.command.ActiveMQDestination).parameterNames = destination + +reconnectionPolicy = org.apache.activemq.network.jms.ReconnectionPolicy + +redeliveryPlugin = org.apache.activemq.broker.util.RedeliveryPlugin + +redeliveryPolicy = org.apache.activemq.RedeliveryPolicy + +redeliveryPolicyMap = org.apache.activemq.broker.region.policy.RedeliveryPolicyMap + +roundRobinDispatchPolicy = org.apache.activemq.broker.region.policy.RoundRobinDispatchPolicy + +shared-file-locker = org.apache.activemq.store.SharedFileLocker + +sharedDeadLetterStrategy = org.apache.activemq.broker.region.policy.SharedDeadLetterStrategy + +simpleAuthenticationPlugin = org.apache.activemq.security.SimpleAuthenticationPlugin +org.apache.activemq.security.SimpleAuthenticationPlugin(java.util.List).parameterNames = users + +simpleAuthorizationMap = org.apache.activemq.security.SimpleAuthorizationMap +org.apache.activemq.security.SimpleAuthorizationMap(org.apache.activemq.filter.DestinationMap,org.apache.activemq.filter.DestinationMap,org.apache.activemq.filter.DestinationMap).parameterNames = writeACLs readACLs adminACLs + +simpleDispatchPolicy = org.apache.activemq.broker.region.policy.SimpleDispatchPolicy + +simpleDispatchSelector = org.apache.activemq.broker.region.policy.SimpleDispatchSelector +org.apache.activemq.broker.region.policy.SimpleDispatchSelector(org.apache.activemq.command.ActiveMQDestination).parameterNames = destination + +simpleJmsMessageConvertor = org.apache.activemq.network.jms.SimpleJmsMessageConvertor + +simpleMessageGroupMapFactory = org.apache.activemq.broker.region.group.SimpleMessageGroupMapFactory + +sslContext = org.apache.activemq.spring.SpringSslContext +sslContext.initMethod = afterPropertiesSet + +statements = org.apache.activemq.store.jdbc.Statements + +statisticsBrokerPlugin = org.apache.activemq.plugin.StatisticsBrokerPlugin + +storeCursor = org.apache.activemq.broker.region.policy.StorePendingQueueMessageStoragePolicy + +storeDurableSubscriberCursor = org.apache.activemq.broker.region.policy.StorePendingDurableSubscriberMessageStoragePolicy + +storeUsage = org.apache.activemq.usage.StoreUsage +storeUsage.limit.propertyEditor = org.apache.activemq.util.MemoryPropertyEditor +storeUsage.percentUsageMinDelta.propertyEditor = org.apache.activemq.util.MemoryPropertyEditor +org.apache.activemq.usage.StoreUsage(java.lang.String,org.apache.activemq.store.PersistenceAdapter).parameterNames = name store +org.apache.activemq.usage.StoreUsage(org.apache.activemq.usage.StoreUsage,java.lang.String).parameterNames = parent name + +streamJDBCAdapter = org.apache.activemq.store.jdbc.adapter.StreamJDBCAdapter + +strictOrderDispatchPolicy = org.apache.activemq.broker.region.policy.StrictOrderDispatchPolicy + +sybase-jdbc-adapter = org.apache.activemq.store.jdbc.adapter.SybaseJDBCAdapter + +systemUsage = org.apache.activemq.usage.SystemUsage +org.apache.activemq.usage.SystemUsage(java.lang.String,org.apache.activemq.store.PersistenceAdapter,org.apache.activemq.store.kahadb.plist.PListStore).parameterNames = name adapter tempStore +org.apache.activemq.usage.SystemUsage(org.apache.activemq.usage.SystemUsage,java.lang.String).parameterNames = parent name + +taskRunnerFactory = org.apache.activemq.thread.TaskRunnerFactory +org.apache.activemq.thread.TaskRunnerFactory(java.lang.String).parameterNames = name +org.apache.activemq.thread.TaskRunnerFactory(java.lang.String,int,boolean,int,boolean).parameterNames = name priority daemon maxIterationsPerRun dedicatedTaskRunner +org.apache.activemq.thread.TaskRunnerFactory(java.lang.String,int,boolean,int,boolean,int).parameterNames = name priority daemon maxIterationsPerRun dedicatedTaskRunner maxThreadPoolSize + +tempDestinationAuthorizationEntry = org.apache.activemq.security.TempDestinationAuthorizationEntry + +tempQueue = org.apache.activemq.command.ActiveMQTempQueue +org.apache.activemq.command.ActiveMQTempQueue(java.lang.String).parameterNames = name +org.apache.activemq.command.ActiveMQTempQueue(org.apache.activemq.command.ConnectionId,long).parameterNames = connectionId sequenceId + +tempTopic = org.apache.activemq.command.ActiveMQTempTopic +org.apache.activemq.command.ActiveMQTempTopic(java.lang.String).parameterNames = name +org.apache.activemq.command.ActiveMQTempTopic(org.apache.activemq.command.ConnectionId,long).parameterNames = connectionId sequenceId + +tempUsage = org.apache.activemq.usage.TempUsage +tempUsage.limit.propertyEditor = org.apache.activemq.util.MemoryPropertyEditor +tempUsage.percentUsageMinDelta.propertyEditor = org.apache.activemq.util.MemoryPropertyEditor +org.apache.activemq.usage.TempUsage(java.lang.String,org.apache.activemq.store.kahadb.plist.PListStore).parameterNames = name store +org.apache.activemq.usage.TempUsage(org.apache.activemq.usage.TempUsage,java.lang.String).parameterNames = parent name + +timeStampingBrokerPlugin = org.apache.activemq.broker.util.TimeStampingBrokerPlugin + +timedSubscriptionRecoveryPolicy = org.apache.activemq.broker.region.policy.TimedSubscriptionRecoveryPolicy + +topic = org.apache.activemq.command.ActiveMQTopic +org.apache.activemq.command.ActiveMQTopic(java.lang.String).parameterNames = name + +traceBrokerPathPlugin = org.apache.activemq.broker.util.TraceBrokerPathPlugin + +transact-database-locker = org.apache.activemq.store.jdbc.adapter.TransactDatabaseLocker + +transact-jdbc-adapter = org.apache.activemq.store.jdbc.adapter.TransactJDBCAdapter + +transportConnector = org.apache.activemq.broker.TransportConnector +org.apache.activemq.broker.TransportConnector(org.apache.activemq.transport.TransportServer).parameterNames = server + +udpTraceBrokerPlugin = org.apache.activemq.broker.util.UDPTraceBrokerPlugin + +uniquePropertyMessageEvictionStrategy = org.apache.activemq.broker.region.policy.UniquePropertyMessageEvictionStrategy + +usageCapacity = org.apache.activemq.usage.UsageCapacity + +virtualDestinationInterceptor = org.apache.activemq.broker.region.virtual.VirtualDestinationInterceptor + +virtualSelectorCacheBrokerPlugin = org.apache.activemq.plugin.SubQueueSelectorCacheBrokerPlugin + +virtualTopic = org.apache.activemq.broker.region.virtual.VirtualTopic + +vmCursor = org.apache.activemq.broker.region.policy.VMPendingSubscriberMessageStoragePolicy + +vmDurableCursor = org.apache.activemq.broker.region.policy.VMPendingDurableSubscriberMessageStoragePolicy + +vmQueueCursor = org.apache.activemq.broker.region.policy.VMPendingQueueMessageStoragePolicy + +xaConnectionFactory = org.apache.activemq.spring.ActiveMQXAConnectionFactory +xaConnectionFactory.initMethod = afterPropertiesSet + diff --git a/activemq-core/src/main/resources/META-INF/spring.handlers b/activemq-core/src/main/resources/META-INF/spring.handlers new file mode 100644 index 0000000000..06743926d3 --- /dev/null +++ b/activemq-core/src/main/resources/META-INF/spring.handlers @@ -0,0 +1,3 @@ +#Generated by xbean-spring +#Tue Sep 25 10:20:04 EDT 2012 +http\://activemq.apache.org/schema/core=org.apache.xbean.spring.context.v2.XBeanNamespaceHandler diff --git a/activemq-core/src/main/resources/activemq.xsd b/activemq-core/src/main/resources/activemq.xsd new file mode 100644 index 0000000000..cd5cd7ed0e --- /dev/null +++ b/activemq-core/src/main/resources/activemq.xsd @@ -0,0 +1,9296 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
  • + + ]]>
    +
    + + + + + + + + + + + + + + + + + + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Mirrored +Queues should be supported by default if they have not been +explicitly configured. + ]]> + + + + + + + + + + + Virtual +Topics should be supported by default if they have not been +explicitly configured. + ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Mirrored +Queues should be supported by default if they have not been +explicitly configured. + ]]> + + + + + + + + + + + Virtual +Topics should be supported by default if they have not been +explicitly configured. + ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + DOT file creator plugin which +creates a DOT file showing the current connections + ]]> + + + + + + + + + + + + + + + + + Spring enhanced connection +factory which will automatically use the Spring bean name as the clientIDPrefix property +so that connections created have client IDs related to your Spring.xml file for +easier comprehension from JMX. + ]]> + + + + + + + + + + + + + + + + + + + Note: access to this clientInternalExceptionListener will not be serialized if it is associated with more than +on connection (as it will be if more than one connection is subsequently created by this connection factory) + ]]> + + + + + + + + + + + + + Note: access to this exceptionLinstener will not be serialized if it is associated with more than +on connection (as it will be if more than one connection is subsequently created by this connection factory) + ]]> + + + + + + + + + + + prefetch +policy for consumers created by this connection. + ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + connection +URL used to connect to the ActiveMQ broker. + ]]> + + + + + + + + + + + + + + + + + Note: access to this clientInternalExceptionListener will not be serialized if it is associated with more than +on connection (as it will be if more than one connection is subsequently created by this connection factory) + ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + dispatched +synchronously or asynchronously by the broker. For non-durable +topics for example we typically dispatch synchronously by default to +minimize context switches which boost performance. However sometimes its +better to go slower to ensure that a single blocked consumer socket does +not block delivery to other consumers. + ]]> + + + + + Note: access to this exceptionLinstener will not be serialized if it is associated with more than +on connection (as it will be if more than one connection is subsequently created by this connection factory) + ]]> + + + + + Message Groups + ]]> + + + + + + + Nested +Structures of Map and List objects + ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + prefetch +policy for consumers created by this connection. + ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Async Sends which +adds a massive performance boost; but means that the send() method will +return immediately whether the message has been sent or not which could +lead to message loss. + ]]> + + + + + + + + + + + + + + + + + + + + + + start +the connection so this option makes the default case to create a +warning if the user forgets. To disable the warning just set the value to < +0 (say -1). + ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + sub-classing is +encouraged to override the default implementation of methods to account for differences in JDBC Driver +implementations.

    The JDBCAdapter inserts and extracts BLOB data using the getBytes()/setBytes() operations.

    +The databases/JDBC drivers that use this adapter are: +

      +
    • +
    + ]]>
    +
    + + + + + + + + + + + + + + + + + + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + DOT +file creator plugin which creates a DOT file showing the current topic & queue hierarchies. + ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
  • Sybase
  • +
  • MS SQL
  • + + ]]>
    +
    + + + + + + + + + + + + + + + + + + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + container name field and subscription id field must be reduced to 150 characters. +Therefore be sure not to use longer names for container name and subscription id than 150 characters. + ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java.security.auth.login.config system property +is not defined then it is set to the location of the login.config file on the classpath. + ]]> + + + + + + + + + + + + + + + + + + + + + + java.security.auth.login.config system property +is not defined then it is set to the location of the login.config file on the classpath. + ]]> + + + + + + + + + + + + + + + + + + + + + + java.security.auth.login.config system property +is not defined then it is set to the location of the login.config file on the classpath. + ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + null +if the broker name was not set. + ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Master Slave for High +Availability of messages. + ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Message +Groups functionality. + ]]> + + + + + + + + + + + + + + + + + Mirrored +Queue using a prefix and postfix to define the topic name on which to mirror the queue to. + ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + multicast://address:port + ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Subclassing is encouraged to override the default +implementation of methods to account for differences +in JDBC Driver implementations. +

    +The JDBCAdapter inserts and extracts BLOB data using the +getBytes()/setBytes() operations. +

    +The databases/JDBC drivers that use this adapter are: +

      +
    • +
    + ]]>
    +
    + + + + + + + + + + + + + + + + + + + +
    + + + + + + +Subclassing is encouraged to override the default +implementation of methods to account for differences +in JDBC Driver implementations. +

    + ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Message Groups +functionality. + ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Message Groups +functionality. + ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Subclassing is encouraged to override the default +implementation of methods to account for differences +in JDBC Driver implementations. +

    +The JDBCAdapter inserts and extracts BLOB data using the +getBytes()/setBytes() operations. +

    +The databases/JDBC drivers that use this adapter are: +

      +
    • +
    + ]]>
    +
    + + + + + + + + + + + + + + + + + + + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Message Groups functionality. + ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
  • Axion
  • + + ]]>
    +
    + + + + + + + + + + + + + + + + + + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Virtual Topics. + ]]> + + + + + + + + + + + + + + + + + + + + + + + + + +This stops the build-up of unwanted messages, especially when consumers may +disconnect from time to time when using virtual destinations. +

    +This is influenced by code snippets developed by Maciej Rakowicz + ]]> + + + + + + + + + + + + + + + + + Virtual +Topics using a prefix and postfix. The virtual destination creates a +wildcard that is then used to look up all active queue subscriptions which +match. + ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Spring enhanced XA connection +factory which will automatically use the Spring bean name as the clientIDPrefix property +so that connections created have client IDs related to your Spring.xml file for +easier comprehension from JMX. + ]]> + + + + + + + + + + + + + + + + + + + Note: access to this clientInternalExceptionListener will not be serialized if it is associated with more than +on connection (as it will be if more than one connection is subsequently created by this connection factory) + ]]> + + + + + + + + + + + + + Note: access to this exceptionLinstener will not be serialized if it is associated with more than +on connection (as it will be if more than one connection is subsequently created by this connection factory) + ]]> + + + + + + + + + + + prefetch +policy for consumers created by this connection. + ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + connection +URL used to connect to the ActiveMQ broker. + ]]> + + + + + + + + + + + + + + + + + Note: access to this clientInternalExceptionListener will not be serialized if it is associated with more than +on connection (as it will be if more than one connection is subsequently created by this connection factory) + ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + dispatched +synchronously or asynchronously by the broker. For non-durable +topics for example we typically dispatch synchronously by default to +minimize context switches which boost performance. However sometimes its +better to go slower to ensure that a single blocked consumer socket does +not block delivery to other consumers. + ]]> + + + + + Note: access to this exceptionLinstener will not be serialized if it is associated with more than +on connection (as it will be if more than one connection is subsequently created by this connection factory) + ]]> + + + + + Message Groups + ]]> + + + + + + + Nested +Structures of Map and List objects + ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + prefetch +policy for consumers created by this connection. + ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Async Sends which +adds a massive performance boost; but means that the send() method will +return immediately whether the message has been sent or not which could +lead to message loss. + ]]> + + + + + + + + + + + + + + + + + + + + + + start +the connection so this option makes the default case to create a +warning if the user forgets. To disable the warning just set the value to < +0 (say -1). + ]]> + + + + + + + + + + diff --git a/activemq-core/src/main/resources/activemq.xsd.html b/activemq-core/src/main/resources/activemq.xsd.html new file mode 100644 index 0000000000..5e3ca8e3ed --- /dev/null +++ b/activemq-core/src/main/resources/activemq.xsd.html @@ -0,0 +1,2843 @@ + + + +Schema for namespace: http://activemq.apache.org/schema/core + + + + + + + +

    Root Element

    + + + +
    ElementDescriptionClass
    brokerAn ActiveMQ Message Broker. It consists of a number of transport +connectors, network connectors and a bunch of properties which can be used to +configure the broker as its lazily created.org.apache.activemq.xbean.XBeanBrokerService
    + +

    Element Summary

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    ElementDescriptionClass
    abortSlowConsumerStrategyAbort slow consumers when they reach the configured threshold of slowness, default is slow for 30 secondsorg.apache.activemq.broker.region.policy.AbortSlowConsumerStrategy
    amqPersistenceAdapterAn implementation of {@link PersistenceAdapter} designed for use with a +{@link Journal} and then check pointing asynchronously on a timeout with some +other long term persistent storage.org.apache.activemq.store.amq.AMQPersistenceAdapter
    amqPersistenceAdapterFactoryAn implementation of {@link PersistenceAdapterFactory}org.apache.activemq.store.amq.AMQPersistenceAdapterFactory
    authenticationUserA helper object used to configure simple authentiaction pluginorg.apache.activemq.security.AuthenticationUser
    authorizationEntryRepresents an entry in a {@link DefaultAuthorizationMap} for assigning +different operations (read, write, admin) of user roles to a specific +destination or a hierarchical wildcard area of destinations.org.apache.activemq.security.AuthorizationEntry
    authorizationMapRepresents a destination based configuration of policies so that individual +destinations or wildcard hierarchies of destinations can be configured using +different policies. Each entry in the map represents the authorization ACLs +for each operation.org.apache.activemq.security.DefaultAuthorizationMap
    authorizationPluginAn authorization plugin where each operation on a destination is checked +against an authorizationMaporg.apache.activemq.security.AuthorizationPlugin
    axionJDBCAdapterAxion specific Adapter. + +Axion does not seem to support ALTER statements or sub-selects. This means: +- We cannot auto upgrade the schema was we roll out new versions of ActiveMQ +- We cannot delete durable sub messages that have be acknowledged by all consumers.org.apache.activemq.store.jdbc.adapter.AxionJDBCAdapter
    blobJDBCAdapterThis JDBCAdapter inserts and extracts BLOB data using the getBlob()/setBlob() +operations. This is a little more involved since to insert a blob you have +to: + +1: insert empty blob. 2: select the blob 3: finally update the blob with data +value. + +The databases/JDBC drivers that use this adapter are: +
      +
    • +
    org.apache.activemq.store.jdbc.adapter.BlobJDBCAdapter
    brokerAn ActiveMQ Message Broker. It consists of a number of transport +connectors, network connectors and a bunch of properties which can be used to +configure the broker as its lazily created.org.apache.activemq.xbean.XBeanBrokerService
    brokerServiceManages the lifecycle of an ActiveMQ Broker. A BrokerService consists of a +number of transport connectors, network connectors and a bunch of properties +which can be used to configure the broker as its lazily created.org.apache.activemq.broker.BrokerService
    bytesJDBCAdapterThis JDBCAdapter inserts and extracts BLOB data using the +setBytes()/getBytes() operations. The databases/JDBC drivers that use this +adapter are:org.apache.activemq.store.jdbc.adapter.BytesJDBCAdapter
    cachedLDAPAuthorizationMapA {@link DefaultAuthorizationMap} implementation which uses LDAP to initialize and update authorization +policy.org.apache.activemq.security.CachedLDAPAuthorizationMap
    commandAgentAn agent which listens to commands on a JMS destinationorg.apache.activemq.broker.util.CommandAgent
    compositeDemandForwardingBridgeA demand forwarding bridge which works with multicast style transports where +a single Transport could be communicating with multiple remote brokersorg.apache.activemq.network.CompositeDemandForwardingBridge
    compositeQueueRepresents a virtual queue which forwards to a number of other destinations.org.apache.activemq.broker.region.virtual.CompositeQueue
    compositeTopicRepresents a virtual topic which forwards to a number of other destinations.org.apache.activemq.broker.region.virtual.CompositeTopic
    conditionalNetworkBridgeFilterFactoryimplement conditional behaviour for queue consumers, +allows replaying back to origin if no consumers are present on the local broker +after a configurable delay, irrespective of the networkTTL +Also allows rate limiting of messages through the network, useful for static includesorg.apache.activemq.network.ConditionalNetworkBridgeFilterFactory
    connectionDotFilePluginA DOT file creator plugin which +creates a DOT file showing the current connectionsorg.apache.activemq.broker.view.ConnectionDotFilePlugin
    connectionFactoryA Spring enhanced connection +factory which will automatically use the Spring bean name as the clientIDPrefix property +so that connections created have client IDs related to your Spring.xml file for +easier comprehension from JMX.org.apache.activemq.spring.ActiveMQConnectionFactory
    constantPendingMessageLimitStrategyThis PendingMessageLimitStrategy is configured to a constant value for all subscriptions.org.apache.activemq.broker.region.policy.ConstantPendingMessageLimitStrategy
    database-lockerRepresents an exclusive lock on a database to avoid multiple brokers running +against the same logical database.org.apache.activemq.store.jdbc.DefaultDatabaseLocker
    db2JDBCAdapterorg.apache.activemq.store.jdbc.adapter.DB2JDBCAdapter
    defaultIOExceptionHandlerorg.apache.activemq.util.DefaultIOExceptionHandler
    defaultJDBCAdapterImplements all the default JDBC operations that are used by the JDBCPersistenceAdapter.

    sub-classing is +encouraged to override the default implementation of methods to account for differences in JDBC Driver +implementations.

    The JDBCAdapter inserts and extracts BLOB data using the getBytes()/setBytes() operations.

    +The databases/JDBC drivers that use this adapter are: +

      +
    • +
    org.apache.activemq.store.jdbc.adapter.DefaultJDBCAdapter
    defaultNetworkBridgeFilterFactoryimplement default behaviour, filter that will not allow resend to origin +based on brokerPath and which respects networkTTLorg.apache.activemq.network.DefaultNetworkBridgeFilterFactory
    defaultUsageCapacityIdentify if a limit has been reachedorg.apache.activemq.usage.DefaultUsageCapacity
    demandForwardingBridgeForwards messages from the local broker to the remote broker based on demand.org.apache.activemq.network.DemandForwardingBridge
    destinationDotFilePluginA DOT +file creator plugin which creates a DOT file showing the current topic & queue hierarchies.org.apache.activemq.broker.view.DestinationDotFilePlugin
    destinationEntryA default entry in a DestinationMap which holds a single value.org.apache.activemq.filter.DefaultDestinationMapEntry
    destinationPathSeparatorPluginorg.apache.activemq.broker.util.DestinationPathSeparatorBroker
    discardingDLQBrokerPluginorg.apache.activemq.plugin.DiscardingDLQBrokerPlugin
    fileCursorPending messagesorg.apache.activemq.broker.region.policy.FilePendingSubscriberMessageStoragePolicy
    fileDurableSubscriberCursorPending messages for durable subscribersorg.apache.activemq.broker.region.policy.FilePendingDurableSubscriberMessageStoragePolicy
    fileQueueCursorPendingorg.apache.activemq.broker.region.policy.FilePendingQueueMessageStoragePolicy
    filteredDestinationRepresents a destination which is filtered using some predicate such as a selector +so that messages are only dispatched to the destination if they match the filter.org.apache.activemq.broker.region.virtual.FilteredDestination
    filteredKahaDBorg.apache.activemq.store.kahadb.FilteredKahaDBPersistenceAdapter
    fixedCountSubscriptionRecoveryPolicyThis implementation of {@link SubscriptionRecoveryPolicy} will keep a fixed +count of last messages.org.apache.activemq.broker.region.policy.FixedCountSubscriptionRecoveryPolicy
    fixedSizedSubscriptionRecoveryPolicyThis implementation of {@link SubscriptionRecoveryPolicy} will keep a fixed +amount of memory available in RAM for message history which is evicted in +time order.org.apache.activemq.broker.region.policy.FixedSizedSubscriptionRecoveryPolicy
    forcePersistencyModeBrokerA Plugin which allows to force every incoming message to be PERSISTENT or NON-PERSISTENT. + +Useful, if you have set the broker usage policy to process ONLY persistent or ONLY non-persistent +messages.org.apache.activemq.plugin.ForcePersistencyModeBroker
    forcePersistencyModeBrokerPluginA Plugin which allows to force every incoming message to be PERSISTENT or NON-PERSISTENT. + +Useful, if you have set the broker usage policy to process ONLY persistent or ONLY non-persistent +messages.org.apache.activemq.plugin.ForcePersistencyModeBrokerPlugin
    forwardingBridgeForwards all messages from the local broker to the remote broker.org.apache.activemq.network.ForwardingBridge
    hsqldb-jdbc-adapterorg.apache.activemq.store.jdbc.adapter.HsqldbJDBCAdapter
    imageBasedJDBCAdaptorProvides JDBCAdapter since that uses +IMAGE datatype to hold binary data. + +The databases/JDBC drivers that use this adapter are: +
      +
    • Sybase
    • +
    • MS SQL
    • +
    org.apache.activemq.store.jdbc.adapter.ImageBasedJDBCAdaptor
    inboundQueueBridgeCreate an Inbound Queue Bridge. By default this class uses the sname name for +both the inbound and outbound queue. This behavior can be overridden however +by using the setter methods to configure both the inbound and outboud queue names +separately.org.apache.activemq.network.jms.InboundQueueBridge
    inboundTopicBridgeCreate an Inbound Topic Bridge. By default this class uses the topic name for +both the inbound and outbound topic. This behavior can be overridden however +by using the setter methods to configure both the inbound and outboud topic names +separately.org.apache.activemq.network.jms.InboundTopicBridge
    individualDeadLetterStrategyA {@link DeadLetterStrategy} where each destination has its own individual +DLQ using the subject naming hierarchy.org.apache.activemq.broker.region.policy.IndividualDeadLetterStrategy
    informixJDBCAdapterJDBC Adapter for Informix database. +Because Informix database restricts length of composite primary keys, length of +container name field and subscription id field must be reduced to 150 characters. +Therefore be sure not to use longer names for container name and subscription id than 150 characters.org.apache.activemq.store.jdbc.adapter.InformixJDBCAdapter
    jDBCIOExceptionHandlerorg.apache.activemq.store.jdbc.JDBCIOExceptionHandler
    jaasAuthenticationPluginProvides a JAAS based authentication pluginorg.apache.activemq.security.JaasAuthenticationPlugin
    jaasCertificateAuthenticationPluginProvides a JAAS based SSL certificate authentication pluginorg.apache.activemq.security.JaasCertificateAuthenticationPlugin
    jaasDualAuthenticationPluginProvides a JAAS based authentication pluginorg.apache.activemq.security.JaasDualAuthenticationPlugin
    jdbcPersistenceAdapterA {@link PersistenceAdapter} implementation using JDBC for persistence +storage. + +This persistence adapter will correctly remember prepared XA transactions, +but it will not keep track of local transaction commits so that operations +performed against the Message store are done as a single uow.org.apache.activemq.store.jdbc.JDBCPersistenceAdapter
    jmsQueueConnectorA Bridge to other JMS Queue providersorg.apache.activemq.network.jms.JmsQueueConnector
    jmsTopicConnectorA Bridge to other JMS Topic providersorg.apache.activemq.network.jms.JmsTopicConnector
    journalPersistenceAdapterAn implementation of {@link PersistenceAdapter} designed for use with a +{@link Journal} and then check pointing asynchronously on a timeout with some +other long term persistent storage.org.apache.activemq.store.journal.JournalPersistenceAdapter
    journalPersistenceAdapterFactoryFactory class that can create PersistenceAdapter objects.org.apache.activemq.store.journal.JournalPersistenceAdapterFactory
    journaledJDBCCreates a default persistence model using the Journal and JDBCorg.apache.activemq.store.PersistenceAdapterFactoryBean
    kahaDBAn implementation of {@link PersistenceAdapter} designed for use with +KahaDB - Embedded Lightweight Non-Relational Databaseorg.apache.activemq.store.kahadb.KahaDBPersistenceAdapter
    kahaPersistenceAdapterorg.apache.activemq.store.kahadaptor.KahaPersistenceAdapter
    lDAPAuthorizationMapAn {@link AuthorizationMap} which uses LDAPorg.apache.activemq.security.LDAPAuthorizationMap
    lastImageSubscriptionRecoveryPolicyThis implementation of {@link SubscriptionRecoveryPolicy} will only keep the +last message.org.apache.activemq.broker.region.policy.LastImageSubscriptionRecoveryPolicy
    ldapNetworkConnectorclass to create dynamic network connectors listed in an directory +server using the LDAP v3 protocol as defined in RFC 2251, the +entries listed in the directory server must implement the ipHost +and ipService objectClasses as defined in RFC 2307.org.apache.activemq.network.LdapNetworkConnector
    lease-database-lockerRepresents an exclusive lease on a database to avoid multiple brokers running +against the same logical database.org.apache.activemq.store.jdbc.LeaseDatabaseLocker
    levelDBAn implementation of {@link org.apache.activemq.store.PersistenceAdapter} designed for use with +LevelDB - Embedded Lightweight Non-Relational Databaseorg.apache.activemq.store.leveldb.LevelDBPersistenceAdapter
    loggingBrokerPluginA simple Broker intercepter which allows you to enable/disable logging.org.apache.activemq.broker.util.LoggingBrokerPlugin
    mKahaDBAn implementation of {@link org.apache.activemq.store.PersistenceAdapter} that supports +distribution of destinations across multiple kahaDB persistence adaptersorg.apache.activemq.store.kahadb.MultiKahaDBPersistenceAdapter
    managementContextAn abstraction over JMX mbean registrationorg.apache.activemq.broker.jmx.ManagementContext
    masterConnectorConnects a Slave Broker to a Master when using Master Slave for High +Availability of messages.org.apache.activemq.broker.ft.MasterConnector
    maxdb-jdbc-adapterJDBC Adapter for the MaxDB database.org.apache.activemq.store.jdbc.adapter.MaxDBJDBCAdapter
    memoryPersistenceAdapterorg.apache.activemq.store.memory.MemoryPersistenceAdapter
    memoryUsageUsed to keep track of how much of something is being used so that a +productive working set usage can be controlled. Main use case is manage +memory usage.org.apache.activemq.usage.MemoryUsage
    messageGroupHashBucketFactoryA factory to create instances of {@link SimpleMessageGroupMap} when +implementing the Message +Groups functionality.org.apache.activemq.broker.region.group.MessageGroupHashBucketFactory
    mirroredQueueCreates Mirrored +Queue using a prefix and postfix to define the topic name on which to mirror the queue to.org.apache.activemq.broker.region.virtual.MirroredQueue
    multicastNetworkConnectorA network connector which uses some kind of multicast-like transport that +communicates with potentially many remote brokers over a single logical +{@link Transport} instance such as when using multicast. + +This implementation does not depend on multicast at all; any other group +based transport could be used.org.apache.activemq.network.MulticastNetworkConnector
    multicastTraceBrokerPluginA Broker interceptor which allows you to trace all operations to a Multicast +socket.org.apache.activemq.broker.util.MulticastTraceBrokerPlugin
    mysql-jdbc-adapterorg.apache.activemq.store.jdbc.adapter.MySqlJDBCAdapter
    networkConnectorA network connector which uses a discovery agent to detect the remote brokers +available and setup a connection to each available remote brokerorg.apache.activemq.network.DiscoveryNetworkConnector
    noSubscriptionRecoveryPolicyThis SubscriptionRecoveryPolicy disable recovery of messages.org.apache.activemq.broker.region.policy.NoSubscriptionRecoveryPolicy
    oldestMessageEvictionStrategyAn eviction strategy which evicts the oldest message first (which is the +default).org.apache.activemq.broker.region.policy.OldestMessageEvictionStrategy
    oldestMessageWithLowestPriorityEvictionStrategyAn eviction strategy which evicts the oldest message with the lowest priority first.org.apache.activemq.broker.region.policy.OldestMessageWithLowestPriorityEvictionStrategy
    oracleBlobJDBCAdapterImplements all the default JDBC operations that are used +by the JDBCPersistenceAdapter. +

    +Subclassing is encouraged to override the default +implementation of methods to account for differences +in JDBC Driver implementations. +

    +The JDBCAdapter inserts and extracts BLOB data using the +getBytes()/setBytes() operations. +

    +The databases/JDBC drivers that use this adapter are: +

      +
    • +
    org.apache.activemq.store.jdbc.adapter.OracleBlobJDBCAdapter
    oracleJDBCAdapterImplements all the default JDBC operations that are used +by the JDBCPersistenceAdapter. +

    +Subclassing is encouraged to override the default +implementation of methods to account for differences +in JDBC Driver implementations. +

    org.apache.activemq.store.jdbc.adapter.OracleJDBCAdapter
    outboundQueueBridgeCreate an Outbound Queue Bridge. By default the bridge uses the same +name for both the inbound and outbound queues, however this can be altered +by using the public setter methods to configure both inbound and outbound +queue names.org.apache.activemq.network.jms.OutboundQueueBridge
    outboundTopicBridgeCreate an Outbound Topic Bridge. By default the bridge uses the same +name for both the inbound and outbound topics, however this can be altered +by using the public setter methods to configure both inbound and outbound +topic names.org.apache.activemq.network.jms.OutboundTopicBridge
    pListStoreorg.apache.activemq.store.kahadb.plist.PListStore
    policyEntryRepresents an entry in a {@link PolicyMap} for assigning policies to a +specific destination or a hierarchical wildcard area of destinations.org.apache.activemq.broker.region.policy.PolicyEntry
    policyMapRepresents a destination based configuration of policies so that individual +destinations or wildcard hierarchies of destinations can be configured using +different policies.org.apache.activemq.broker.region.policy.PolicyMap
    postgresql-jdbc-adapterImplements all the default JDBC operations that are used +by the JDBCPersistenceAdapter. +

    +Subclassing is encouraged to override the default +implementation of methods to account for differences +in JDBC Driver implementations. +

    +The JDBCAdapter inserts and extracts BLOB data using the +getBytes()/setBytes() operations. +

    +The databases/JDBC drivers that use this adapter are: +

      +
    • +
    org.apache.activemq.store.jdbc.adapter.PostgresqlJDBCAdapter
    prefetchPolicyDefines the prefetch message policies for different types of consumersorg.apache.activemq.ActiveMQPrefetchPolicy
    prefetchRatePendingMessageLimitStrategyThis PendingMessageLimitStrategy sets the maximum pending message limit value to be +a multiplier of the prefetch limit of the subscription.org.apache.activemq.broker.region.policy.PrefetchRatePendingMessageLimitStrategy
    priorityNetworkDispatchPolicydispatch policy that ignores lower priority duplicate network consumers, +used in conjunction with network bridge suppresDuplicateTopicSubscriptionsorg.apache.activemq.broker.region.policy.PriorityNetworkDispatchPolicy
    proxyConnectororg.apache.activemq.proxy.ProxyConnector
    queryBasedSubscriptionRecoveryPolicyThis implementation of {@link SubscriptionRecoveryPolicy} will perform a user +specific query mechanism to load any messages they may have missed.org.apache.activemq.broker.region.policy.QueryBasedSubscriptionRecoveryPolicy
    queueAn ActiveMQ Queueorg.apache.activemq.command.ActiveMQQueue
    queueDispatchSelectorQueue dispatch policy that determines if a message can be sent to a subscriptionorg.apache.activemq.broker.region.QueueDispatchSelector
    reconnectionPolicyA policy object that defines how a {@link JmsConnector} deals with +reconnection of the local and foreign connections.org.apache.activemq.network.jms.ReconnectionPolicy
    redeliveryPluginReplace regular DLQ handling with redelivery via a resend to the original destination +after a delay +A destination matching RedeliveryPolicy controls the quantity and delay for re-sends +If there is no matching policy or an existing policy limit is exceeded by default +regular DLQ processing resumes. This is controlled via sendToDlqIfMaxRetriesExceeded +and fallbackToDeadLetterorg.apache.activemq.broker.util.RedeliveryPlugin
    redeliveryPolicyConfiguration options for a messageConsumer used to control how messages are re-delivered when they +are rolled back. +May be used server side on a per destination basis via the Broker RedeliveryPluginorg.apache.activemq.RedeliveryPolicy
    redeliveryPolicyMapRepresents a destination based configuration of policies so that individual +destinations or wildcard hierarchies of destinations can be configured using +different policies.org.apache.activemq.broker.region.policy.RedeliveryPolicyMap
    roundRobinDispatchPolicySimple dispatch policy that sends a message to every subscription that +matches the message.org.apache.activemq.broker.region.policy.RoundRobinDispatchPolicy
    shared-file-lockerRepresents an exclusive lock on a database to avoid multiple brokers running +against the same logical database.org.apache.activemq.store.SharedFileLocker
    sharedDeadLetterStrategyA default implementation of {@link DeadLetterStrategy} which uses +a constant destination.org.apache.activemq.broker.region.policy.SharedDeadLetterStrategy
    simpleAuthenticationPluginProvides a simple authentication pluginorg.apache.activemq.security.SimpleAuthenticationPlugin
    simpleAuthorizationMapAn AuthorizationMap which is configured with individual DestinationMaps for +each operation.org.apache.activemq.security.SimpleAuthorizationMap
    simpleDispatchPolicySimple dispatch policy that sends a message to every subscription that +matches the message.org.apache.activemq.broker.region.policy.SimpleDispatchPolicy
    simpleDispatchSelectorSimple dispatch policy that determines if a message can be sent to a subscriptionorg.apache.activemq.broker.region.policy.SimpleDispatchSelector
    simpleJmsMessageConvertorConverts Message from one JMS to anotherorg.apache.activemq.network.jms.SimpleJmsMessageConvertor
    simpleMessageGroupMapFactoryA factory to create instances of {@link SimpleMessageGroupMap} when implementing the +Message Groups functionality.org.apache.activemq.broker.region.group.SimpleMessageGroupMapFactory
    sslContextExtends the SslContext so that it's easier to configure from spring.org.apache.activemq.spring.SpringSslContext
    statementsorg.apache.activemq.store.jdbc.Statements
    statisticsBrokerPluginA StatisticsBrokerPlugin +You can retrieve a Map Message for a Destination - or +Broker containing statistics as key-value pairs The message must contain a +replyTo Destination - else its ignored +To retrieve stats on the broker send a empty message to ActiveMQ.Statistics.Broker (Queue or Topic) +With a replyTo set to the destination you want the stats returned to. +To retrieve stats for a destination - e.g. foo - send an empty message to ActiveMQ.Statistics.Destination.foo +- this works with wildcards to - you get a message for each wildcard match on the replyTo destination. +The stats message is a MapMessage populated with statistics for the targetorg.apache.activemq.plugin.StatisticsBrokerPlugin
    storeCursorPending messagesorg.apache.activemq.broker.region.policy.StorePendingQueueMessageStoragePolicy
    storeDurableSubscriberCursorPending messages for a durableorg.apache.activemq.broker.region.policy.StorePendingDurableSubscriberMessageStoragePolicy
    storeUsageUsed to keep track of how much of something is being used so that a +productive working set usage can be controlled. Main use case is manage +memory usage.org.apache.activemq.usage.StoreUsage
    streamJDBCAdapterThis JDBCAdapter inserts and extracts BLOB data using the +setBinaryStream()/getBinaryStream() operations. + +The databases/JDBC drivers that use this adapter are: +
      +
    • Axion
    • +
    org.apache.activemq.store.jdbc.adapter.StreamJDBCAdapter
    strictOrderDispatchPolicyDispatch policy that causes every subscription to see messages in the same +order.org.apache.activemq.broker.region.policy.StrictOrderDispatchPolicy
    sybase-jdbc-adapterA JDBC Adapter for Sybase databasesorg.apache.activemq.store.jdbc.adapter.SybaseJDBCAdapter
    systemUsageHolder for Usage instances for memory, store and temp files Main use case is +manage memory usage.org.apache.activemq.usage.SystemUsage
    taskRunnerFactoryManages the thread pool for long running tasks. Long running tasks are not +always active but when they are active, they may need a few iterations of +processing for them to become idle. The manager ensures that each task is +processes but that no one task overtakes the system. This is kinda like +cooperative multitasking.org.apache.activemq.thread.TaskRunnerFactory
    tempDestinationAuthorizationEntryRepresents an entry in a {@link DefaultAuthorizationMap} for assigning +different operations (read, write, admin) of user roles to a temporary +destinationorg.apache.activemq.security.TempDestinationAuthorizationEntry
    tempQueueAn ActiveMQ Temporary Queue Destinationorg.apache.activemq.command.ActiveMQTempQueue
    tempTopicAn ActiveMQ Temporary Topic Destinationorg.apache.activemq.command.ActiveMQTempTopic
    tempUsageUsed to keep track of how much of something is being used so that a +productive working set usage can be controlled. Main use case is manage +memory usage.org.apache.activemq.usage.TempUsage
    timeStampingBrokerPluginA Broker interceptor which updates a JMS Client's timestamp on the message +with a broker timestamp. Useful when the clocks on client machines are known +to not be correct and you can only trust the time set on the broker machines. + +Enabling this plugin will break JMS compliance since the timestamp that the +producer sees on the messages after as send() will be different from the +timestamp the consumer will observe when he receives the message. This plugin +is not enabled in the default ActiveMQ configuration. + +2 new attributes have been added which will allow the administrator some override control +over the expiration time for incoming messages: + +Attribute 'zeroExpirationOverride' can be used to apply an expiration +time to incoming messages with no expiration defined (messages that would never expire) + +Attribute 'ttlCeiling' can be used to apply a limit to the expiration timeorg.apache.activemq.broker.util.TimeStampingBrokerPlugin
    timedSubscriptionRecoveryPolicyThis implementation of {@link SubscriptionRecoveryPolicy} will keep a timed +buffer of messages around in memory and use that to recover new +subscriptions.org.apache.activemq.broker.region.policy.TimedSubscriptionRecoveryPolicy
    topicAn ActiveMQ Topicorg.apache.activemq.command.ActiveMQTopic
    traceBrokerPathPluginThe TraceBrokerPathPlugin can be used in a network of Brokers. Each Broker +that has the plugin configured, will add it's brokerName to the content +of a JMS Property. If all Brokers have this property enabled, the path the +message actually took through the network can be seen in the defined property.org.apache.activemq.broker.util.TraceBrokerPathPlugin
    transact-database-lockerRepresents an exclusive lock on a database to avoid multiple brokers running +against the same logical database.org.apache.activemq.store.jdbc.adapter.TransactDatabaseLocker
    transact-jdbc-adapterA JDBC Adapter for Transact-SQL based databases such as SQL Server or Sybaseorg.apache.activemq.store.jdbc.adapter.TransactJDBCAdapter
    transportConnectororg.apache.activemq.broker.TransportConnector
    udpTraceBrokerPluginA Broker interceptor which allows you to trace all operations to a UDP +socket.org.apache.activemq.broker.util.UDPTraceBrokerPlugin
    uniquePropertyMessageEvictionStrategyAn eviction strategy which evicts the oldest message within messages with the same property valueorg.apache.activemq.broker.region.policy.UniquePropertyMessageEvictionStrategy
    usageCapacityIdentify if a limit has been reachedorg.apache.activemq.usage.UsageCapacity
    virtualDestinationInterceptorImplements Virtual Topics.org.apache.activemq.broker.region.virtual.VirtualDestinationInterceptor
    virtualSelectorCacheBrokerPluginA plugin which allows the caching of the selector from a subscription queue. +

    +This stops the build-up of unwanted messages, especially when consumers may +disconnect from time to time when using virtual destinations. +

    +This is influenced by code snippets developed by Maciej Rakowicz

    org.apache.activemq.plugin.SubQueueSelectorCacheBrokerPlugin
    virtualTopicCreates Virtual +Topics using a prefix and postfix. The virtual destination creates a +wildcard that is then used to look up all active queue subscriptions which +match.org.apache.activemq.broker.region.virtual.VirtualTopic
    vmCursorPending messages heldorg.apache.activemq.broker.region.policy.VMPendingSubscriberMessageStoragePolicy
    vmDurableCursorPendingorg.apache.activemq.broker.region.policy.VMPendingDurableSubscriberMessageStoragePolicy
    vmQueueCursorPending messagesorg.apache.activemq.broker.region.policy.VMPendingQueueMessageStoragePolicy
    xaConnectionFactoryA Spring enhanced XA connection +factory which will automatically use the Spring bean name as the clientIDPrefix property +so that connections created have client IDs related to your Spring.xml file for +easier comprehension from JMX.org.apache.activemq.spring.ActiveMQXAConnectionFactory
    + + +

    Element Detail

    +

    Element: abortSlowConsumerStrategy

    + + + + + + + +
    AttributeTypeDescription
    abortConnectionxs:booleanabort the consumers connection rather than sending a stop command to the remote consumer
    checkPeriodxs:longtime in milliseconds between checks for slow subscriptions
    maxSlowCountxs:longnumber of times a subscription can be deemed slow before triggering abort +effect depends on dispatch rate as slow determination is done on dispatch
    maxSlowDurationxs:longtime in milliseconds that a sub can remain slow before triggering +an abort.
    namexs:string
    + + + +
    ElementTypeDescription
    brokerServicedestinationPathSeparatorPlugin | forcePersistencyModeBroker | loggingBrokerPlugin | multicastTraceBrokerPlugin | redeliveryPlugin | timeStampingBrokerPlugin | traceBrokerPathPlugin | udpTraceBrokerPlugin
    +

    Element: amqPersistenceAdapter

    + + + + + + + + + + + + + + + + + + + + + + + + +
    AttributeTypeDescription
    archiveDataLogsxs:boolean
    brokerNamexs:string
    checkpointIntervalxs:long
    cleanupIntervalxs:long
    directoryxs:string
    directoryArchivexs:string
    disableLockingxs:boolean
    forceRecoverReferenceStorexs:boolean
    indexBinSizexs:integer
    indexKeySizexs:integer
    indexLoadFactorxs:integer
    indexMaxBinSizexs:integer
    indexPageSizexs:stringWhen set using Xbean, values of the form "20 Mb", "1024kb", and "1g" can be used
    journalThreadPriorityxs:integer
    maxCheckpointMessageAddSizexs:stringWhen set using Xbean, values of the form "20 Mb", "1024kb", and "1g" can be used
    maxFileLengthxs:stringWhen set using Xbean, values of the form "20 Mb", "1024kb", and "1g" can be used
    maxReferenceFileLengthxs:stringWhen set using Xbean, values of the form "20 Mb", "1024kb", and "1g" can be used
    persistentIndexxs:boolean
    recoverReferenceStorexs:boolean
    syncOnWritexs:boolean
    useDedicatedTaskRunnerxs:boolean
    useNioxs:boolean
    + + + + + + + + +
    ElementTypeDescription
    asyncDataManager<spring:bean/>
    brokerServicebroker | brokerService
    referenceStoreAdapter<spring:bean/>
    taskRunnerFactorytaskRunnerFactory
    usageManagersystemUsage
    wireFormat<spring:bean/>
    +

    Element: amqPersistenceAdapterFactory

    + + + + + + + + + + + + + + + + + + + + + +
    AttributeTypeDescription
    brokerNamexs:string
    checkpointIntervalxs:long
    cleanupIntervalxs:long
    dataDirectoryxs:string
    forceRecoverReferenceStorexs:boolean
    indexBinSizexs:integer
    indexKeySizexs:integer
    indexLoadFactorxs:integer
    indexMaxBinSizexs:integer
    indexPageSizexs:integer
    journalThreadPriorityxs:integer
    maxFileLengthxs:integer
    maxReferenceFileLengthxs:integer
    persistentIndexxs:boolean
    recoverReferenceStorexs:boolean
    syncOnTransactionxs:boolean
    syncOnWritexs:boolean
    useDedicatedTaskRunnerxs:boolean
    useNioxs:boolean
    + + + + +
    ElementTypeDescription
    referenceStoreAdapter<spring:bean/>
    taskRunnerFactorytaskRunnerFactory
    +

    Element: authenticationUser

    + + + + + +
    AttributeTypeDescription
    groupsxs:string
    passwordxs:string
    usernamexs:string
    +

    Element: authorizationEntry

    + + + + + + + + + + +
    AttributeTypeDescription
    adminxs:string
    groupClassxs:string
    queuexs:stringA helper method to set the destination from a configuration file
    readxs:string
    tempQueuexs:boolean
    tempTopicxs:boolean
    topicxs:stringA helper method to set the destination from a configuration file
    writexs:string
    + + + + + + +
    ElementTypeDescription
    adminACLs(<spring:bean/>)*
    destinationqueue | tempQueue | tempTopic | topic
    readACLs(<spring:bean/>)*
    writeACLs(<spring:bean/>)*
    +

    Element: authorizationMap

    + + + + + + +
    ElementTypeDescription
    authorizationEntries(<spring:bean/>)*Sets the individual entries on the authorization map
    defaultEntryauthorizationEntry | tempDestinationAuthorizationEntry
    entries(<spring:bean/>)*A helper method to allow the destination map to be populated from a +dependency injection framework such as Spring
    tempDestinationAuthorizationEntrytempDestinationAuthorizationEntry
    +

    Element: authorizationPlugin

    + + + +
    ElementTypeDescription
    mapauthorizationMap | cachedLDAPAuthorizationMap | lDAPAuthorizationMap | simpleAuthorizationMap
    +

    Element: axionJDBCAdapter

    + + + + + +
    AttributeTypeDescription
    batchStatmentsxs:boolean
    maxRowsxs:integer
    useExternalMessageReferencesxs:boolean
    + + + +
    ElementTypeDescription
    statementsstatements
    +

    Element: blobJDBCAdapter

    + + + + + +
    AttributeTypeDescription
    batchStatmentsxs:boolean
    maxRowsxs:integer
    useExternalMessageReferencesxs:boolean
    + + + +
    ElementTypeDescription
    statementsstatements
    +

    Element: broker

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    AttributeTypeDescription
    advisorySupportxs:stringAllows the support of advisory messages to be disabled for performance +reasons.
    allowTempAutoCreationOnSendxs:booleanenable if temp destinations need to be propagated through a network when +advisorySupport==false. This is used in conjunction with the policy +gcInactiveDestinations for matching temps so they can get removed +when inactive
    brokerIdxs:string
    brokerNamexs:stringSets the name of this broker; which must be unique in the network
    brokerObjectNamexs:stringSets the JMX ObjectName for this broker
    cacheTempDestinationsxs:boolean
    consumerSystemUsagePortionxs:integer
    dataDirectoryxs:stringSets the directory in which the data files will be stored by default for +the JDBC and Journal persistence adaptors.
    dataDirectoryFilexs:stringSets the directory in which the data files will be stored by default for +the JDBC and Journal persistence adaptors.
    dedicatedTaskRunnerxs:boolean
    deleteAllMessagesOnStartupxs:stringSets whether or not all messages are deleted on startup - mostly only +useful for testing.
    enableStatisticsxs:booleanSets whether or not the Broker's services enable statistics or not.
    keepDurableSubsActivexs:boolean
    masterConnectorURIxs:string
    maxPurgedDestinationsPerSweepxs:integer
    mbeanInvocationTimeoutxs:longGets the time in Milliseconds that an invocation of an MBean method will wait before +failing. The default value is to wait forever (zero).
    monitorConnectionSplitsxs:boolean
    networkConnectorStartAsyncxs:boolean
    offlineDurableSubscriberTaskSchedulexs:integer
    offlineDurableSubscriberTimeoutxs:integer
    passiveSlavexs:stringGet the passiveSlave
    persistenceThreadPriorityxs:integer
    persistentxs:stringSets whether or not persistence is enabled or disabled.
    populateJMSXUserIDxs:booleanSets whether or not the broker should populate the JMSXUserID header.
    populateUserNameInMBeansxs:booleanShould MBeans that support showing the Authenticated User Name information have this +value filled in or not.
    producerSystemUsagePortionxs:integer
    schedulePeriodForDestinationPurgexs:integer
    schedulerDirectoryxs:string
    schedulerDirectoryFilexs:string
    schedulerSupportxs:string
    shutdownOnMasterFailurexs:boolean
    shutdownOnSlaveFailurexs:string
    splitSystemUsageForProducersConsumersxs:boolean
    startxs:booleanSets whether or not the broker is started along with the ApplicationContext it is defined within. +Normally you would want the broker to start up along with the ApplicationContext but sometimes when working +with JUnit tests you may wish to start and stop the broker explicitly yourself.
    startAsyncxs:boolean
    supportFailOverxs:boolean
    systemExitOnShutdownxs:string
    systemExitOnShutdownExitCodexs:integer
    taskRunnerPriorityxs:integer
    timeBeforePurgeTempDestinationsxs:integer
    tmpDataDirectoryxs:string
    useAuthenticatedPrincipalForJMSXUserIDxs:boolean
    useJmxxs:stringSets whether or not the Broker's services should be exposed into JMX or +not.
    useLocalHostBrokerNamexs:boolean
    useLoggingForShutdownErrorsxs:booleanSets whether or not we should use commons-logging when reporting errors +when shutting down the broker
    useMirroredQueuesxs:booleanSets whether or not Mirrored +Queues should be supported by default if they have not been +explicitly configured.
    useShutdownHookxs:booleanSets whether or not we should use a shutdown handler to close down the +broker cleanly if the JVM is terminated. It is recommended you leave this +enabled.
    useTempMirroredQueuesxs:boolean
    useVirtualTopicsxs:booleanSets whether or not Virtual +Topics should be supported by default if they have not been +explicitly configured.
    vmConnectorURIxs:string
    waitForSlavexs:string
    waitForSlaveTimeoutxs:long
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    ElementTypeDescription
    adminView<spring:bean/>Returns the administration view of the broker; used to create and destroy +resources such as queues and topics. Note this method returns null if JMX +is disabled.
    brokerContext<spring:bean/>
    consumerSystemUsagesystemUsage
    destinationFactory<spring:bean/>
    destinationInterceptors(mirroredQueue | virtualDestinationInterceptor)*Sets the destination interceptors to use
    destinationPolicypolicyMapSets the destination specific policies available either for exact +destinations or for wildcard areas of destinations.
    destinations(queue | tempQueue | tempTopic | topic)*Sets the destinations which should be loaded/created on startup
    ioExceptionHandlerdefaultIOExceptionHandler | jDBCIOExceptionHandleroverride the Default IOException handler, called when persistence adapter +has experiences File or JDBC I/O Exceptions
    jmsBridgeConnectors(jmsQueueConnector | jmsTopicConnector)*
    managementContextmanagementContext
    messageAuthorizationPolicy<spring:bean/>Sets the policy used to decide if the current connection is authorized to +consume a given message
    networkConnectorURIs(<spring:bean/>)*
    networkConnectors(ldapNetworkConnector | multicastNetworkConnector | networkConnector)*Sets the network connectors which this broker will use to connect to +other brokers in a federated network
    persistenceAdapteramqPersistenceAdapter | jdbcPersistenceAdapter | journalPersistenceAdapter | kahaDB | kahaPersistenceAdapter | levelDB | mKahaDB | memoryPersistenceAdapterSets the persistence adaptor implementation to use for this broker
    persistenceFactoryamqPersistenceAdapterFactory | journalPersistenceAdapterFactory | journaledJDBC
    persistenceTaskRunnerFactorytaskRunnerFactory
    plugins(authorizationPlugin | connectionDotFilePlugin | destinationDotFilePlugin | destinationPathSeparatorPlugin | discardingDLQBrokerPlugin | forcePersistencyModeBrokerPlugin | jaasAuthenticationPlugin | jaasCertificateAuthenticationPlugin | jaasDualAuthenticationPlugin | loggingBrokerPlugin | multicastTraceBrokerPlugin | redeliveryPlugin | simpleAuthenticationPlugin | statisticsBrokerPlugin | timeStampingBrokerPlugin | traceBrokerPathPlugin | udpTraceBrokerPlugin | virtualSelectorCacheBrokerPlugin)*Sets a number of broker plugins to install such as for security +authentication or authorization
    producerSystemUsagesystemUsage
    proxyConnectors(<spring:bean/>)*Sets the network connectors which this broker will use to connect to +other brokers in a federated network
    regionBrokerdestinationPathSeparatorPlugin | forcePersistencyModeBroker | loggingBrokerPlugin | multicastTraceBrokerPlugin | redeliveryPlugin | timeStampingBrokerPlugin | traceBrokerPathPlugin | udpTraceBrokerPlugin
    services(broker | brokerService | commandAgent | database-locker | forwardingBridge | inboundQueueBridge | inboundTopicBridge | jdbcPersistenceAdapter | jmsQueueConnector | jmsTopicConnector | journalPersistenceAdapterFactory | journaledJDBC | kahaDB | ldapNetworkConnector | lease-database-locker | levelDB | managementContext | masterConnector | memoryUsage | multicastNetworkConnector | networkConnector | outboundQueueBridge | outboundTopicBridge | pListStore | proxyConnector | shared-file-locker | storeUsage | systemUsage | tempUsage | transact-database-locker)*Sets the services associated with this broker such as a +{@link MasterConnector}
    shutdownHooks(<spring:bean/>)*Sets hooks to be executed when broker shut down
    sslContextsslContext
    systemUsagesystemUsage
    taskRunnerFactorytaskRunnerFactory
    tempDataStorepListStore
    transportConnectorURIs(<spring:bean/>)*
    transportConnectors(transportConnector)*Sets the transport connectors which this broker will listen on for new +clients
    +

    Element: brokerService

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    AttributeTypeDescription
    advisorySupportxs:stringAllows the support of advisory messages to be disabled for performance +reasons.
    allowTempAutoCreationOnSendxs:booleanenable if temp destinations need to be propagated through a network when +advisorySupport==false. This is used in conjunction with the policy +gcInactiveDestinations for matching temps so they can get removed +when inactive
    brokerIdxs:string
    brokerNamexs:stringSets the name of this broker; which must be unique in the network
    brokerObjectNamexs:stringSets the JMX ObjectName for this broker
    cacheTempDestinationsxs:boolean
    consumerSystemUsagePortionxs:integer
    dataDirectoryxs:stringSets the directory in which the data files will be stored by default for +the JDBC and Journal persistence adaptors.
    dataDirectoryFilexs:stringSets the directory in which the data files will be stored by default for +the JDBC and Journal persistence adaptors.
    dedicatedTaskRunnerxs:boolean
    deleteAllMessagesOnStartupxs:stringSets whether or not all messages are deleted on startup - mostly only +useful for testing.
    enableStatisticsxs:booleanSets whether or not the Broker's services enable statistics or not.
    keepDurableSubsActivexs:boolean
    masterConnectorURIxs:string
    maxPurgedDestinationsPerSweepxs:integer
    mbeanInvocationTimeoutxs:longGets the time in Milliseconds that an invocation of an MBean method will wait before +failing. The default value is to wait forever (zero).
    monitorConnectionSplitsxs:boolean
    networkConnectorStartAsyncxs:boolean
    offlineDurableSubscriberTaskSchedulexs:integer
    offlineDurableSubscriberTimeoutxs:integer
    passiveSlavexs:stringGet the passiveSlave
    persistenceThreadPriorityxs:integer
    persistentxs:stringSets whether or not persistence is enabled or disabled.
    populateJMSXUserIDxs:booleanSets whether or not the broker should populate the JMSXUserID header.
    populateUserNameInMBeansxs:booleanShould MBeans that support showing the Authenticated User Name information have this +value filled in or not.
    producerSystemUsagePortionxs:integer
    schedulePeriodForDestinationPurgexs:integer
    schedulerDirectoryxs:string
    schedulerDirectoryFilexs:string
    schedulerSupportxs:string
    shutdownOnMasterFailurexs:boolean
    shutdownOnSlaveFailurexs:string
    splitSystemUsageForProducersConsumersxs:boolean
    startAsyncxs:boolean
    supportFailOverxs:boolean
    systemExitOnShutdownxs:string
    systemExitOnShutdownExitCodexs:integer
    taskRunnerPriorityxs:integer
    timeBeforePurgeTempDestinationsxs:integer
    tmpDataDirectoryxs:string
    useAuthenticatedPrincipalForJMSXUserIDxs:boolean
    useJmxxs:stringSets whether or not the Broker's services should be exposed into JMX or +not.
    useLocalHostBrokerNamexs:boolean
    useLoggingForShutdownErrorsxs:booleanSets whether or not we should use commons-logging when reporting errors +when shutting down the broker
    useMirroredQueuesxs:booleanSets whether or not Mirrored +Queues should be supported by default if they have not been +explicitly configured.
    useShutdownHookxs:booleanSets whether or not we should use a shutdown handler to close down the +broker cleanly if the JVM is terminated. It is recommended you leave this +enabled.
    useTempMirroredQueuesxs:boolean
    useVirtualTopicsxs:booleanSets whether or not Virtual +Topics should be supported by default if they have not been +explicitly configured.
    vmConnectorURIxs:string
    waitForSlavexs:string
    waitForSlaveTimeoutxs:long
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    ElementTypeDescription
    adminView<spring:bean/>Returns the administration view of the broker; used to create and destroy +resources such as queues and topics. Note this method returns null if JMX +is disabled.
    brokerContext<spring:bean/>
    consumerSystemUsagesystemUsage
    destinationFactory<spring:bean/>
    destinationInterceptors(mirroredQueue | virtualDestinationInterceptor)*Sets the destination interceptors to use
    destinationPolicypolicyMapSets the destination specific policies available either for exact +destinations or for wildcard areas of destinations.
    destinations(queue | tempQueue | tempTopic | topic)*Sets the destinations which should be loaded/created on startup
    ioExceptionHandlerdefaultIOExceptionHandler | jDBCIOExceptionHandleroverride the Default IOException handler, called when persistence adapter +has experiences File or JDBC I/O Exceptions
    jmsBridgeConnectors(jmsQueueConnector | jmsTopicConnector)*
    managementContextmanagementContext
    messageAuthorizationPolicy<spring:bean/>Sets the policy used to decide if the current connection is authorized to +consume a given message
    networkConnectorURIs(<spring:bean/>)*
    networkConnectors(ldapNetworkConnector | multicastNetworkConnector | networkConnector)*Sets the network connectors which this broker will use to connect to +other brokers in a federated network
    persistenceAdapteramqPersistenceAdapter | jdbcPersistenceAdapter | journalPersistenceAdapter | kahaDB | kahaPersistenceAdapter | levelDB | mKahaDB | memoryPersistenceAdapterSets the persistence adaptor implementation to use for this broker
    persistenceFactoryamqPersistenceAdapterFactory | journalPersistenceAdapterFactory | journaledJDBC
    persistenceTaskRunnerFactorytaskRunnerFactory
    plugins(authorizationPlugin | connectionDotFilePlugin | destinationDotFilePlugin | destinationPathSeparatorPlugin | discardingDLQBrokerPlugin | forcePersistencyModeBrokerPlugin | jaasAuthenticationPlugin | jaasCertificateAuthenticationPlugin | jaasDualAuthenticationPlugin | loggingBrokerPlugin | multicastTraceBrokerPlugin | redeliveryPlugin | simpleAuthenticationPlugin | statisticsBrokerPlugin | timeStampingBrokerPlugin | traceBrokerPathPlugin | udpTraceBrokerPlugin | virtualSelectorCacheBrokerPlugin)*Sets a number of broker plugins to install such as for security +authentication or authorization
    producerSystemUsagesystemUsage
    proxyConnectors(<spring:bean/>)*Sets the network connectors which this broker will use to connect to +other brokers in a federated network
    regionBrokerdestinationPathSeparatorPlugin | forcePersistencyModeBroker | loggingBrokerPlugin | multicastTraceBrokerPlugin | redeliveryPlugin | timeStampingBrokerPlugin | traceBrokerPathPlugin | udpTraceBrokerPlugin
    services(broker | brokerService | commandAgent | database-locker | forwardingBridge | inboundQueueBridge | inboundTopicBridge | jdbcPersistenceAdapter | jmsQueueConnector | jmsTopicConnector | journalPersistenceAdapterFactory | journaledJDBC | kahaDB | ldapNetworkConnector | lease-database-locker | levelDB | managementContext | masterConnector | memoryUsage | multicastNetworkConnector | networkConnector | outboundQueueBridge | outboundTopicBridge | pListStore | proxyConnector | shared-file-locker | storeUsage | systemUsage | tempUsage | transact-database-locker)*Sets the services associated with this broker such as a +{@link MasterConnector}
    shutdownHooks(<spring:bean/>)*Sets hooks to be executed when broker shut down
    sslContextsslContext
    systemUsagesystemUsage
    taskRunnerFactorytaskRunnerFactory
    tempDataStorepListStore
    transportConnectorURIs(<spring:bean/>)*
    transportConnectors(transportConnector)*Sets the transport connectors which this broker will listen on for new +clients
    +

    Element: bytesJDBCAdapter

    + + + + + +
    AttributeTypeDescription
    batchStatmentsxs:boolean
    maxRowsxs:integer
    useExternalMessageReferencesxs:boolean
    + + + +
    ElementTypeDescription
    statementsstatements
    +

    Element: cachedLDAPAuthorizationMap

    + + + + + + + + + + + + + + + + + + + + + +
    AttributeTypeDescription
    adminPermissionGroupSearchFilterxs:string
    authenticationxs:string
    connectionPasswordxs:string
    connectionProtocolxs:string
    connectionURLxs:string
    connectionUsernamexs:string
    groupNameAttributexs:string
    groupObjectClassxs:string
    legacyGroupMappingxs:boolean
    permissionGroupMemberAttributexs:string
    queueSearchBasexs:string
    readPermissionGroupSearchFilterxs:string
    refreshDisabledxs:boolean
    refreshIntervalxs:integer
    tempSearchBasexs:string
    topicSearchBasexs:string
    userNameAttributexs:string
    userObjectClassxs:string
    writePermissionGroupSearchFilterxs:string
    + + + + + + +
    ElementTypeDescription
    authorizationEntries(<spring:bean/>)*Sets the individual entries on the authorization map
    defaultEntryauthorizationEntry | tempDestinationAuthorizationEntry
    entries(<spring:bean/>)*A helper method to allow the destination map to be populated from a +dependency injection framework such as Spring
    tempDestinationAuthorizationEntrytempDestinationAuthorizationEntry
    +

    Element: commandAgent

    + + + + + +
    AttributeTypeDescription
    brokerUrlxs:string
    passwordxs:string
    usernamexs:string
    + + + + + +
    ElementTypeDescription
    commandDestinationqueue | tempQueue | tempTopic | topic
    connection<spring:bean/>
    connectionFactoryconnectionFactory | xaConnectionFactory
    +

    Element: compositeDemandForwardingBridge

    + + + + +
    AttributeTypeDescription
    createdByDuplexxs:boolean
    mbeanObjectNamexs:string
    + + + + + + + + + + + +
    ElementTypeDescription
    brokerServicebroker | brokerService
    configurationldapNetworkConnector | multicastNetworkConnector | networkConnector
    durableDestinations(queue | tempQueue | tempTopic | topic)*
    dynamicallyIncludedDestinations(queue | tempQueue | tempTopic | topic)*
    excludedDestinations(queue | tempQueue | tempTopic | topic)*
    localBroker<spring:bean/>
    networkBridgeListener<spring:bean/>
    remoteBroker<spring:bean/>
    staticallyIncludedDestinations(queue | tempQueue | tempTopic | topic)*
    +

    Element: compositeQueue

    + + + + + +
    AttributeTypeDescription
    copyMessagexs:booleanSets whether a copy of the message will be sent to each destination. +Defaults to true so that the forward destination is set as the +destination of the message
    forwardOnlyxs:booleanSets if the virtual destination is forward only (and so there is no +physical queue to match the virtual queue) or if there is also a physical +queue with the same name).
    namexs:stringSets the name of this composite destination
    + + + +
    ElementTypeDescription
    forwardTo(<spring:bean/>)*Sets the list of destinations to forward to
    +

    Element: compositeTopic

    + + + + + +
    AttributeTypeDescription
    copyMessagexs:booleanSets whether a copy of the message will be sent to each destination. +Defaults to true so that the forward destination is set as the +destination of the message
    forwardOnlyxs:booleanSets if the virtual destination is forward only (and so there is no +physical queue to match the virtual queue) or if there is also a physical +queue with the same name).
    namexs:stringSets the name of this composite destination
    + + + +
    ElementTypeDescription
    forwardTo(<spring:bean/>)*Sets the list of destinations to forward to
    +

    Element: conditionalNetworkBridgeFilterFactory

    + + + + + + +
    AttributeTypeDescription
    rateDurationxs:integer
    rateLimitxs:integer
    replayDelayxs:integer
    replayWhenNoConsumersxs:boolean
    +

    Element: connectionDotFilePlugin

    + + + +
    AttributeTypeDescription
    filexs:stringSets the destination file name to create the destination diagram
    +

    Element: connectionFactory

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    AttributeTypeDescription
    alwaysSessionAsyncxs:booleanIf this flag is not set then a separate thread is not used for dispatching messages for each Session in +the Connection. However, a separate thread is always used if there is more than one session, or the session +isn't in auto acknowledge or duplicates ok mode. By default this value is set to true and session dispatch +happens asynchronously.
    alwaysSyncSendxs:booleanSet true if always require messages to be sync sent
    auditDepthxs:integer
    auditMaximumProducerNumberxs:integer
    beanNamexs:string
    brokerURLxs:stringSets the connection +URL used to connect to the ActiveMQ broker.
    checkForDuplicatesxs:boolean
    clientIDxs:stringSets the JMS clientID to use for the created connection. Note that this +can only be used by one connection at once so generally its a better idea +to set the clientID on a Connection
    clientIDPrefixxs:stringSets the prefix used by autogenerated JMS Client ID values which are used +if the JMS client does not explicitly specify on.
    closeTimeoutxs:integerSets the timeout before a close is considered complete. Normally a +close() on a connection waits for confirmation from the broker; this +allows that operation to timeout to save the client hanging if there is +no broker
    connectionIDPrefixxs:stringSets the prefix used by connection id generator
    consumerFailoverRedeliveryWaitPeriodxs:long
    copyMessageOnSendxs:booleanShould a JMS message be copied to a new JMS Message object as part of the +send() method in JMS. This is enabled by default to be compliant with the +JMS specification. You can disable it if you do not mutate JMS messages +after they are sent for a performance boost
    disableTimeStampsByDefaultxs:booleanSets whether or not timestamps on messages should be disabled or not. If +you disable them it adds a small performance boost.
    dispatchAsyncxs:booleanEnables or disables the default setting of whether or not consumers have +their messages dispatched +synchronously or asynchronously by the broker. For non-durable +topics for example we typically dispatch synchronously by default to +minimize context switches which boost performance. However sometimes its +better to go slower to ensure that a single blocked consumer socket does +not block delivery to other consumers.
    exclusiveConsumerxs:booleanEnables or disables whether or not queue consumers should be exclusive or +not for example to preserve ordering when not using Message Groups
    maxThreadPoolSizexs:integer
    messagePrioritySupportedxs:boolean
    nestedMapAndListEnabledxs:booleanEnables/disables whether or not Message properties and MapMessage entries +support Nested +Structures of Map and List objects
    nonBlockingRedeliveryxs:booleanWhen true a MessageConsumer will not stop Message delivery before re-delivering Messages +from a rolled back transaction. This implies that message order will not be preserved and +also will result in the TransactedIndividualAck option to be enabled.
    objectMessageSerializationDeferedxs:booleanWhen an object is set on an ObjectMessage, the JMS spec requires the +object to be serialized by that set method. Enabling this flag causes the +object to not get serialized. The object may subsequently get serialized +if the message needs to be sent over a socket or stored to disk.
    optimizeAcknowledgexs:boolean
    optimizeAcknowledgeTimeOutxs:longThe max time in milliseconds between optimized ack batches
    optimizedAckScheduledAckIntervalxs:longGets the configured time interval that is used to force all MessageConsumers that have optimizedAcknowledge enabled +to send an ack for any outstanding Message Acks. By default this value is set to zero meaning that the consumers +will not do any background Message acknowledgment.
    optimizedMessageDispatchxs:booleanIf this flag is set then an larger prefetch limit is used - only +applicable for durable topic subscribers.
    passwordxs:stringSets the JMS password used for connections created from this factory
    producerWindowSizexs:integer
    sendAcksAsyncxs:boolean
    sendTimeoutxs:integer
    statsEnabledxs:boolean
    transactedIndividualAckxs:booleanwhen true, submit individual transacted acks immediately rather than with transaction completion. +This allows the acks to represent delivery status which can be persisted on rollback +Used in conjunction with org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter#setRewriteOnRedelivery(boolean) true
    useAsyncSendxs:booleanForces the use of Async Sends which +adds a massive performance boost; but means that the send() method will +return immediately whether the message has been sent or not which could +lead to message loss.
    useBeanNameAsClientIdPrefixxs:boolean
    useCompressionxs:booleanEnables the use of compression of the message bodies
    useDedicatedTaskRunnerxs:boolean
    useRetroactiveConsumerxs:booleanSets whether or not retroactive consumers are enabled. Retroactive +consumers allow non-durable topic subscribers to receive old messages +that were published before the non-durable subscriber started.
    userNamexs:stringSets the JMS userName used by connections created by this factory
    warnAboutUnstartedConnectionTimeoutxs:longEnables the timeout from a connection creation to when a warning is +generated if the connection is not properly started via +{@link Connection#start()} and a message is received by a consumer. It is +a very common gotcha to forget to start +the connection so this option makes the default case to create a +warning if the user forgets. To disable the warning just set the value to < +0 (say -1).
    watchTopicAdvisoriesxs:boolean
    + + + + + + + + + + + + + + + +
    ElementTypeDescription
    blobTransferPolicy<spring:bean/>Sets the policy used to describe how out-of-band BLOBs (Binary Large +OBjects) are transferred from producers to brokers to consumers
    clientIdGenerator<spring:bean/>
    clientInternalExceptionListener<spring:bean/>Allows an {@link ClientInternalExceptionListener} to be configured on the ConnectionFactory so that when this factory +is used by frameworks which don't expose the Connection such as Spring JmsTemplate, you can register +an exception listener. +

    Note: access to this clientInternalExceptionListener will not be serialized if it is associated with more than +on connection (as it will be if more than one connection is subsequently created by this connection factory)

    connectionIdGenerator<spring:bean/>
    exceptionListenercommandAgentAllows an {@link ExceptionListener} to be configured on the ConnectionFactory so that when this factory +is used by frameworks which don't expose the Connection such as Spring JmsTemplate, you can register +an exception listener. +

    Note: access to this exceptionLinstener will not be serialized if it is associated with more than +on connection (as it will be if more than one connection is subsequently created by this connection factory)

    prefetchPolicyprefetchPolicySets the prefetch +policy for consumers created by this connection.
    properties<spring:bean/>Get the properties from this instance for storing in JNDI
    redeliveryPolicyredeliveryPolicySets the global default redelivery policy to be used when a message is delivered +but the session is rolled back
    redeliveryPolicyMapredeliveryPolicyMapSets the global redelivery policy mapping to be used when a message is delivered +but the session is rolled back
    rejectedTaskHandler<spring:bean/>
    sessionTaskRunnertaskRunnerFactory
    transformer<spring:bean/>Sets the transformer used to transform messages before they are sent on +to the JMS bus or when they are received from the bus but before they are +delivered to the JMS client
    transportListener<spring:bean/>Allows a listener to be configured on the ConnectionFactory so that when this factory is used +with frameworks which don't expose the Connection such as Spring JmsTemplate, you can still register +a transport listener.
    +

    Element: constantPendingMessageLimitStrategy

    + + + +
    AttributeTypeDescription
    limitxs:integer
    +

    Element: database-locker

    + + + + + + +
    AttributeTypeDescription
    failIfLockedxs:boolean
    lockAcquireSleepIntervalxs:long
    namexs:string
    queryTimeoutxs:integer
    + + + +
    ElementTypeDescription
    exceptionHandler<spring:bean/>
    +

    Element: db2JDBCAdapter

    + + + + + +
    AttributeTypeDescription
    batchStatmentsxs:boolean
    maxRowsxs:integer
    useExternalMessageReferencesxs:boolean
    + + + +
    ElementTypeDescription
    statementsstatements
    +

    Element: defaultIOExceptionHandler

    + + + + + + + + + +
    AttributeTypeDescription
    ignoreAllErrorsxs:boolean
    ignoreNoSpaceErrorsxs:boolean
    ignoreSQLExceptionsxs:boolean
    noSpaceMessagexs:string
    resumeCheckSleepPeriodxs:long
    sqlExceptionMessagexs:string
    stopStartConnectorsxs:boolean
    + + + +
    ElementTypeDescription
    brokerServicebroker | brokerService
    +

    Element: defaultJDBCAdapter

    + + + + + +
    AttributeTypeDescription
    batchStatmentsxs:boolean
    maxRowsxs:integer
    useExternalMessageReferencesxs:boolean
    + + + +
    ElementTypeDescription
    statementsstatements
    +

    Element: defaultNetworkBridgeFilterFactory

    +

    Element: defaultUsageCapacity

    + + + +
    AttributeTypeDescription
    limitxs:long
    +

    Element: demandForwardingBridge

    + + + + +
    AttributeTypeDescription
    createdByDuplexxs:boolean
    mbeanObjectNamexs:string
    + + + + + + + + + + + +
    ElementTypeDescription
    brokerServicebroker | brokerService
    configurationldapNetworkConnector | multicastNetworkConnector | networkConnector
    durableDestinations(queue | tempQueue | tempTopic | topic)*
    dynamicallyIncludedDestinations(queue | tempQueue | tempTopic | topic)*
    excludedDestinations(queue | tempQueue | tempTopic | topic)*
    localBroker<spring:bean/>
    networkBridgeListener<spring:bean/>
    remoteBroker<spring:bean/>
    staticallyIncludedDestinations(queue | tempQueue | tempTopic | topic)*
    +

    Element: destinationDotFilePlugin

    + + + +
    AttributeTypeDescription
    filexs:stringSets the destination file name to create the destination diagram
    +

    Element: destinationEntry

    + + + + + + +
    AttributeTypeDescription
    queuexs:stringA helper method to set the destination from a configuration file
    tempQueuexs:boolean
    tempTopicxs:boolean
    topicxs:stringA helper method to set the destination from a configuration file
    + + + + +
    ElementTypeDescription
    destinationqueue | tempQueue | tempTopic | topic
    valueauthorizationEntry | destinationEntry | filteredKahaDB | policyEntry | redeliveryPolicy | tempDestinationAuthorizationEntry
    +

    Element: destinationPathSeparatorPlugin

    + + + +
    AttributeTypeDescription
    pathSeparatorxs:string
    + + + + +
    ElementTypeDescription
    adminConnectionContext<spring:bean/>
    nextdestinationPathSeparatorPlugin | forcePersistencyModeBroker | loggingBrokerPlugin | multicastTraceBrokerPlugin | redeliveryPlugin | timeStampingBrokerPlugin | traceBrokerPathPlugin | udpTraceBrokerPlugin
    +

    Element: discardingDLQBrokerPlugin

    + + + + + + + +
    AttributeTypeDescription
    dropAllxs:boolean
    dropOnlyxs:string
    dropTemporaryQueuesxs:boolean
    dropTemporaryTopicsxs:boolean
    reportIntervalxs:integer
    +

    Element: fileCursor

    +

    Element: fileDurableSubscriberCursor

    +

    Element: fileQueueCursor

    +

    Element: filteredDestination

    + + + + + +
    AttributeTypeDescription
    queuexs:stringSets the destination property to the given queue name
    selectorxs:stringSets the JMS selector used to filter messages before forwarding them to this destination
    topicxs:stringSets the destination property to the given topic name
    + + + + +
    ElementTypeDescription
    destinationqueue | tempQueue | tempTopic | topicThe destination to send messages to if they match the filter
    filter<spring:bean/>
    +

    Element: filteredKahaDB

    + + + + + + + +
    AttributeTypeDescription
    perDestinationxs:boolean
    queuexs:stringA helper method to set the destination from a configuration file
    tempQueuexs:boolean
    tempTopicxs:boolean
    topicxs:stringA helper method to set the destination from a configuration file
    + + + + + +
    ElementTypeDescription
    adapterkahaDB
    destinationqueue | tempQueue | tempTopic | topic
    persistenceAdapterkahaDB
    +

    Element: fixedCountSubscriptionRecoveryPolicy

    + + + +
    AttributeTypeDescription
    maximumSizexs:integerSets the maximum number of messages that this destination will hold +around in RAM
    + + + +
    ElementTypeDescription
    brokerdestinationPathSeparatorPlugin | forcePersistencyModeBroker | loggingBrokerPlugin | multicastTraceBrokerPlugin | redeliveryPlugin | timeStampingBrokerPlugin | traceBrokerPathPlugin | udpTraceBrokerPlugin
    +

    Element: fixedSizedSubscriptionRecoveryPolicy

    + + + + +
    AttributeTypeDescription
    maximumSizexs:integerSets the maximum amount of RAM in bytes that this buffer can hold in RAM
    useSharedBufferxs:boolean
    + + + + +
    ElementTypeDescription
    brokerdestinationPathSeparatorPlugin | forcePersistencyModeBroker | loggingBrokerPlugin | multicastTraceBrokerPlugin | redeliveryPlugin | timeStampingBrokerPlugin | traceBrokerPathPlugin | udpTraceBrokerPlugin
    buffer<spring:bean/>
    +

    Element: forcePersistencyModeBroker

    + + + +
    AttributeTypeDescription
    persistenceFlagxs:boolean
    + + + + +
    ElementTypeDescription
    adminConnectionContext<spring:bean/>
    nextdestinationPathSeparatorPlugin | forcePersistencyModeBroker | loggingBrokerPlugin | multicastTraceBrokerPlugin | redeliveryPlugin | timeStampingBrokerPlugin | traceBrokerPathPlugin | udpTraceBrokerPlugin
    +

    Element: forcePersistencyModeBrokerPlugin

    + + + +
    AttributeTypeDescription
    persistenceFlagxs:booleanSets the persistency mode.
    +

    Element: forwardingBridge

    + + + + + + + +
    AttributeTypeDescription
    clientIdxs:string
    destinationFilterxs:string
    dispatchAsyncxs:boolean
    prefetchSizexs:integer
    useCompressionxs:boolean
    + + + + + +
    ElementTypeDescription
    localBroker<spring:bean/>
    networkBridgeFailedListener<spring:bean/>
    remoteBroker<spring:bean/>
    +

    Element: hsqldb-jdbc-adapter

    + + + + + +
    AttributeTypeDescription
    batchStatmentsxs:boolean
    maxRowsxs:integer
    useExternalMessageReferencesxs:boolean
    + + + +
    ElementTypeDescription
    statementsstatements
    +

    Element: imageBasedJDBCAdaptor

    + + + + + +
    AttributeTypeDescription
    batchStatmentsxs:boolean
    maxRowsxs:integer
    useExternalMessageReferencesxs:boolean
    + + + +
    ElementTypeDescription
    statementsstatements
    +

    Element: inboundQueueBridge

    + + + + + + +
    AttributeTypeDescription
    doHandleReplyToxs:boolean
    inboundQueueNamexs:stringSets the queue name used for the inbound queue, if the outbound queue +name has not been set, then this method uses the same name to configure +the outbound queue name.
    localQueueNamexs:string
    selectorxs:string
    + + + + + + + + + +
    ElementTypeDescription
    consumer<spring:bean/>
    consumerConnection<spring:bean/>
    consumerQueuequeue
    jmsConnectorjmsQueueConnector | jmsTopicConnector
    jmsMessageConvertorsimpleJmsMessageConvertor
    producerConnection<spring:bean/>
    producerQueuequeue
    +

    Element: inboundTopicBridge

    + + + + + + + +
    AttributeTypeDescription
    consumerNamexs:string
    doHandleReplyToxs:boolean
    inboundTopicNamexs:stringSets the topic name used for the inbound topic, if the outbound topic +name has not been set, then this method uses the same name to configure +the outbound topic name.
    localTopicNamexs:string
    selectorxs:string
    + + + + + + + + + +
    ElementTypeDescription
    consumer<spring:bean/>
    consumerConnection<spring:bean/>
    consumerTopictopic
    jmsConnectorjmsQueueConnector | jmsTopicConnector
    jmsMessageConvertorsimpleJmsMessageConvertor
    producerConnection<spring:bean/>
    producerTopictopic
    +

    Element: individualDeadLetterStrategy

    + + + + + + + + + + + + +
    AttributeTypeDescription
    destinationPerDurableSubscriberxs:booleansets whether durable topic subscriptions are to get individual dead letter destinations. +When true, the DLQ is of the form 'topicPrefix.clientId:subscriptionName' +The default is false.
    enableAuditxs:boolean
    processExpiredxs:boolean
    processNonPersistentxs:boolean
    queuePrefixxs:stringSets the prefix to use for all dead letter queues for queue messages
    queueSuffixxs:stringSets the suffix to use for all dead letter queues for queue messages
    topicPrefixxs:stringSets the prefix to use for all dead letter queues for topic messages
    topicSuffixxs:stringSets the suffix to use for all dead letter queues for topic messages
    useQueueForQueueMessagesxs:booleanSets whether a queue or topic should be used for queue messages sent to a +DLQ. The default is to use a Queue
    useQueueForTopicMessagesxs:booleanSets whether a queue or topic should be used for topic messages sent to a +DLQ. The default is to use a Queue
    +

    Element: informixJDBCAdapter

    + + + + + +
    AttributeTypeDescription
    batchStatmentsxs:boolean
    maxRowsxs:integer
    useExternalMessageReferencesxs:boolean
    + + + +
    ElementTypeDescription
    statementsstatements
    +

    Element: jDBCIOExceptionHandler

    + + + + + + + + + +
    AttributeTypeDescription
    ignoreAllErrorsxs:boolean
    ignoreNoSpaceErrorsxs:boolean
    ignoreSQLExceptionsxs:boolean
    noSpaceMessagexs:string
    resumeCheckSleepPeriodxs:long
    sqlExceptionMessagexs:string
    stopStartConnectorsxs:boolean
    + + + +
    ElementTypeDescription
    brokerServicebroker | brokerService
    +

    Element: jaasAuthenticationPlugin

    + + + + +
    AttributeTypeDescription
    configurationxs:stringSets the JAAS configuration domain name used
    discoverLoginConfigxs:booleanEnables or disables the auto-discovery of the login.config file for JAAS to initialize itself. +This flag is enabled by default such that if the java.security.auth.login.config system property +is not defined then it is set to the location of the login.config file on the classpath.
    +

    Element: jaasCertificateAuthenticationPlugin

    + + + + +
    AttributeTypeDescription
    configurationxs:stringSets the JAAS configuration domain name used
    discoverLoginConfigxs:booleanEnables or disables the auto-discovery of the login.config file for JAAS to initialize itself. +This flag is enabled by default such that if the java.security.auth.login.config system property +is not defined then it is set to the location of the login.config file on the classpath.
    +

    Element: jaasDualAuthenticationPlugin

    + + + + + +
    AttributeTypeDescription
    configurationxs:stringSets the JAAS configuration domain name used
    discoverLoginConfigxs:booleanEnables or disables the auto-discovery of the login.config file for JAAS to initialize itself. +This flag is enabled by default such that if the java.security.auth.login.config system property +is not defined then it is set to the location of the login.config file on the classpath.
    sslConfigurationxs:stringSet the JAAS SSL configuration domain
    +

    Element: jdbcPersistenceAdapter

    + + + + + + + + + + + + + + + + + + + +
    AttributeTypeDescription
    auditRecoveryDepthxs:integer
    brokerNamexs:string
    cleanupPeriodxs:integerSets the number of milliseconds until the database is attempted to be +cleaned up for durable topics
    createTablesOnStartupxs:booleanSets whether or not tables are created on startup
    dataDirectoryxs:string
    dataDirectoryFilexs:string
    directoryxs:string
    enableAuditxs:boolean
    lockAcquireSleepIntervalxs:long
    lockKeepAlivePeriodxs:long
    maxAuditDepthxs:integer
    maxProducersToAuditxs:integer
    maxRowsxs:integer
    transactionIsolationxs:integerset the Transaction isolation level to something other that TRANSACTION_READ_UNCOMMITTED +This allowable dirty isolation level may not be achievable in clustered DB environments +so a more restrictive and expensive option may be needed like TRANSACTION_REPEATABLE_READ +see isolation level constants in {@link java.sql.Connection}
    useDatabaseLockxs:boolean
    useExternalMessageReferencesxs:boolean
    useLockxs:boolean
    + + + + + + + + + + + + + +
    ElementTypeDescription
    adapteraxionJDBCAdapter | blobJDBCAdapter | bytesJDBCAdapter | db2JDBCAdapter | defaultJDBCAdapter | hsqldb-jdbc-adapter | imageBasedJDBCAdaptor | informixJDBCAdapter | maxdb-jdbc-adapter | mysql-jdbc-adapter | oracleBlobJDBCAdapter | oracleJDBCAdapter | postgresql-jdbc-adapter | streamJDBCAdapter | sybase-jdbc-adapter | transact-jdbc-adapter
    brokerServicebroker | brokerService
    dataSource<spring:bean/>
    databaseLockerdatabase-locker | lease-database-locker | shared-file-locker | transact-database-lockerSets the database locker strategy to use to lock the database on startup
    ds<spring:bean/>
    lockDataSource<spring:bean/>
    lockerdatabase-locker | lease-database-locker | shared-file-locker | transact-database-locker
    scheduledThreadPoolExecutor<spring:bean/>
    statementsstatements
    usageManagersystemUsage
    wireFormat<spring:bean/>
    +

    Element: jmsQueueConnector

    + + + + + + + + + + + + + + + +
    AttributeTypeDescription
    jndiLocalTemplatexs:string
    jndiOutboundTemplatexs:string
    localClientIdxs:string
    localConnectionFactoryNamexs:string
    localPasswordxs:string
    localUsernamexs:string
    namexs:string
    outboundClientIdxs:string
    outboundPasswordxs:string
    outboundQueueConnectionFactoryNamexs:string
    outboundUsernamexs:string
    preferJndiDestinationLookupxs:booleanSets whether the connector should prefer to first try to find a destination in JNDI before +using JMS semantics to create a Destination. By default the connector will first use JMS +semantics and then fall-back to JNDI lookup, setting this value to true will reverse that +ordering.
    replyToDestinationCacheSizexs:integer
    + + + + + + + + + + + + +
    ElementTypeDescription
    brokerServicebroker | brokerServiceOne way to configure the local connection - this is called by The +BrokerService when the Connector is embedded
    inboundMessageConvertorsimpleJmsMessageConvertor
    inboundQueueBridges(inboundQueueBridge)*
    localQueueConnection<spring:bean/>
    localQueueConnectionFactoryconnectionFactory | xaConnectionFactory
    outboundMessageConvertorsimpleJmsMessageConvertor
    outboundQueueBridges(outboundQueueBridge)*
    outboundQueueConnection<spring:bean/>
    outboundQueueConnectionFactoryconnectionFactory | xaConnectionFactory
    reconnectionPolicyreconnectionPolicy
    +

    Element: jmsTopicConnector

    + + + + + + + + + + + + + + + +
    AttributeTypeDescription
    jndiLocalTemplatexs:string
    jndiOutboundTemplatexs:string
    localClientIdxs:string
    localConnectionFactoryNamexs:string
    localPasswordxs:string
    localUsernamexs:string
    namexs:string
    outboundClientIdxs:string
    outboundPasswordxs:string
    outboundTopicConnectionFactoryNamexs:string
    outboundUsernamexs:string
    preferJndiDestinationLookupxs:booleanSets whether the connector should prefer to first try to find a destination in JNDI before +using JMS semantics to create a Destination. By default the connector will first use JMS +semantics and then fall-back to JNDI lookup, setting this value to true will reverse that +ordering.
    replyToDestinationCacheSizexs:integer
    + + + + + + + + + + + + +
    ElementTypeDescription
    brokerServicebroker | brokerServiceOne way to configure the local connection - this is called by The +BrokerService when the Connector is embedded
    inboundMessageConvertorsimpleJmsMessageConvertor
    inboundTopicBridges(inboundTopicBridge)*
    localTopicConnection<spring:bean/>
    localTopicConnectionFactoryconnectionFactory | xaConnectionFactory
    outboundMessageConvertorsimpleJmsMessageConvertor
    outboundTopicBridges(outboundTopicBridge)*
    outboundTopicConnection<spring:bean/>
    outboundTopicConnectionFactoryconnectionFactory | xaConnectionFactory
    reconnectionPolicyreconnectionPolicy
    +

    Element: journalPersistenceAdapter

    + + + + + + + +
    AttributeTypeDescription
    brokerNamexs:string
    directoryxs:string
    maxCheckpointMessageAddSizexs:integer
    maxCheckpointWorkersxs:integer
    useExternalMessageReferencesxs:boolean
    + + + + + + + + +
    ElementTypeDescription
    brokerServicebroker | brokerService
    journal<spring:bean/>
    longTermPersistenceamqPersistenceAdapter | jdbcPersistenceAdapter | journalPersistenceAdapter | kahaDB | kahaPersistenceAdapter | levelDB | mKahaDB | memoryPersistenceAdapter
    persistenceAdapteramqPersistenceAdapter | jdbcPersistenceAdapter | journalPersistenceAdapter | kahaDB | kahaPersistenceAdapter | levelDB | mKahaDB | memoryPersistenceAdapter
    taskRunnerFactorytaskRunnerFactory
    usageManagersystemUsage
    +

    Element: journalPersistenceAdapterFactory

    + + + + + + + + + + + + + + + +
    AttributeTypeDescription
    createTablesOnStartupxs:booleanSets whether or not tables are created on startup
    dataDirectoryxs:string
    dataDirectoryFilexs:string
    journalArchiveDirectoryxs:string
    journalLogFileSizexs:stringSets the size of the journal log files +When set using Xbean, values of the form "20 Mb", "1024kb", and "1g" can be used
    journalLogFilesxs:integerSets the number of journal log files to use
    journalThreadPriorityxs:integerSets the thread priority of the journal thread
    lockKeepAlivePeriodxs:long
    useDatabaseLockxs:booleanSets whether or not an exclusive database lock should be used to enable +JDBC Master/Slave. Enabled by default.
    useDedicatedTaskRunnerxs:boolean
    useJournalxs:booleanEnables or disables the use of the journal. The default is to use the +journal
    useLockxs:boolean
    useQuickJournalxs:booleanEnables or disables the use of quick journal, which keeps messages in the +journal and just stores a reference to the messages in JDBC. Defaults to +false so that messages actually reside long term in the JDBC database.
    + + + + + + + + + + +
    ElementTypeDescription
    adapteraxionJDBCAdapter | blobJDBCAdapter | bytesJDBCAdapter | db2JDBCAdapter | defaultJDBCAdapter | hsqldb-jdbc-adapter | imageBasedJDBCAdaptor | informixJDBCAdapter | maxdb-jdbc-adapter | mysql-jdbc-adapter | oracleBlobJDBCAdapter | oracleJDBCAdapter | postgresql-jdbc-adapter | streamJDBCAdapter | sybase-jdbc-adapter | transact-jdbc-adapter
    brokerServicebroker | brokerService
    dataSource<spring:bean/>
    jdbcAdapterjdbcPersistenceAdapter
    journal<spring:bean/>
    lockerdatabase-locker | lease-database-locker | shared-file-locker | transact-database-locker
    statementsstatements
    taskRunnerFactorytaskRunnerFactory
    +

    Element: journaledJDBC

    + + + + + + + + + + + + + + + +
    AttributeTypeDescription
    createTablesOnStartupxs:booleanSets whether or not tables are created on startup
    dataDirectoryxs:string
    dataDirectoryFilexs:string
    journalArchiveDirectoryxs:string
    journalLogFileSizexs:stringSets the size of the journal log files +When set using Xbean, values of the form "20 Mb", "1024kb", and "1g" can be used
    journalLogFilesxs:integerSets the number of journal log files to use
    journalThreadPriorityxs:integerSets the thread priority of the journal thread
    lockKeepAlivePeriodxs:long
    useDatabaseLockxs:booleanSets whether or not an exclusive database lock should be used to enable +JDBC Master/Slave. Enabled by default.
    useDedicatedTaskRunnerxs:boolean
    useJournalxs:booleanEnables or disables the use of the journal. The default is to use the +journal
    useLockxs:boolean
    useQuickJournalxs:booleanEnables or disables the use of quick journal, which keeps messages in the +journal and just stores a reference to the messages in JDBC. Defaults to +false so that messages actually reside long term in the JDBC database.
    + + + + + + + + + + +
    ElementTypeDescription
    adapteraxionJDBCAdapter | blobJDBCAdapter | bytesJDBCAdapter | db2JDBCAdapter | defaultJDBCAdapter | hsqldb-jdbc-adapter | imageBasedJDBCAdaptor | informixJDBCAdapter | maxdb-jdbc-adapter | mysql-jdbc-adapter | oracleBlobJDBCAdapter | oracleJDBCAdapter | postgresql-jdbc-adapter | streamJDBCAdapter | sybase-jdbc-adapter | transact-jdbc-adapter
    brokerServicebroker | brokerService
    dataSource<spring:bean/>
    jdbcAdapterjdbcPersistenceAdapter
    journal<spring:bean/>
    lockerdatabase-locker | lease-database-locker | shared-file-locker | transact-database-locker
    statementsstatements
    taskRunnerFactorytaskRunnerFactory
    +

    Element: kahaDB

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    AttributeTypeDescription
    archiveCorruptedIndexxs:boolean
    archiveDataLogsxs:boolean
    brokerNamexs:string
    checkForCorruptJournalFilesxs:boolean
    checkpointIntervalxs:longGet the checkpointInterval
    checksumJournalFilesxs:boolean
    cleanupIntervalxs:longGet the cleanupInterval
    concurrentStoreAndDispatchQueuesxs:boolean
    concurrentStoreAndDispatchTopicsxs:boolean
    databaseLockedWaitDelayxs:integer
    directoryxs:stringGet the directory
    directoryArchivexs:string
    enableIndexDiskSyncsxs:boolean
    enableIndexPageCachingxs:boolean
    enableIndexRecoveryFilexs:boolean
    enableIndexWriteAsyncxs:booleanGet the enableIndexWriteAsync
    enableJournalDiskSyncsxs:booleanGet the enableJournalDiskSyncs
    failoverProducersAuditDepthxs:integerset the audit window depth for duplicate suppression (should exceed the max transaction +batch)
    forceRecoverIndexxs:boolean
    ignoreMissingJournalfilesxs:booleanGet the ignoreMissingJournalfiles
    indexCacheSizexs:stringGet the indexCacheSize
    indexLFUEvictionFactorxs:float
    indexWriteBatchSizexs:stringGet the indexWriteBatchSize
    journalMaxFileLengthxs:stringGet the journalMaxFileLength
    journalMaxWriteBatchSizexs:stringGet the journalMaxWriteBatchSize
    lockKeepAlivePeriodxs:long
    maxAsyncJobsxs:integer
    maxFailoverProducersToTrackxs:integerSet the max number of producers (LRU cache) to track for duplicate sends
    rewriteOnRedeliveryxs:booleanWhen true, persist the redelivery status such that the message redelivery flag can survive a broker failure +used with org.apache.activemq.ActiveMQConnectionFactory#setTransactedIndividualAck(boolean) true
    useIndexLFRUEvictionxs:boolean
    useLockxs:boolean
    + + + + + +
    ElementTypeDescription
    brokerServicebroker | brokerService
    lockerdatabase-locker | lease-database-locker | shared-file-locker | transact-database-locker
    usageManagersystemUsage
    +

    Element: kahaPersistenceAdapter

    + + + + + + +
    AttributeTypeDescription
    brokerNamexs:string
    directoryxs:string
    maxDataFileLengthxs:stringWhen set using Xbean, values of the form "20 Mb", "1024kb", and "1g" can be used
    persistentIndexxs:boolean
    + + + + + +
    ElementTypeDescription
    brokerServicebroker | brokerService
    size<spring:bean/>
    usageManagersystemUsage
    +

    Element: lDAPAuthorizationMap

    + + + + + + + + + + + + + + + + + + + +
    AttributeTypeDescription
    adminAttributexs:string
    adminBasexs:string
    advisorySearchBasexs:string
    authenticationxs:string
    connectionPasswordxs:string
    connectionProtocolxs:string
    connectionURLxs:string
    connectionUsernamexs:string
    initialContextFactoryxs:string
    queueSearchSubtreeBoolxs:boolean
    readAttributexs:string
    readBasexs:string
    tempSearchBasexs:string
    topicSearchSubtreeBoolxs:boolean
    useAdvisorySearchBasexs:boolean
    writeAttributexs:string
    writeBasexs:string
    + + + + + + +
    ElementTypeDescription
    context<spring:bean/>
    options<spring:bean/>
    queueSearchMatchingFormat<spring:bean/>
    topicSearchMatchingFormat<spring:bean/>
    +

    Element: lastImageSubscriptionRecoveryPolicy

    + + + +
    ElementTypeDescription
    brokerdestinationPathSeparatorPlugin | forcePersistencyModeBroker | loggingBrokerPlugin | multicastTraceBrokerPlugin | redeliveryPlugin | timeStampingBrokerPlugin | traceBrokerPathPlugin | udpTraceBrokerPlugin
    +

    Element: ldapNetworkConnector

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    AttributeTypeDescription
    alwaysSyncSendxs:boolean
    anonymousAuthenticationxs:booleansets LDAP anonymous authentication access credentials
    basexs:stringsets the base LDAP dn used for lookup operations
    bridgeTempDestinationsxs:boolean
    brokerNamexs:string
    brokerURLxs:string
    conduitSubscriptionsxs:boolean
    consumerPriorityBasexs:integer
    decreaseNetworkConsumerPriorityxs:boolean
    destinationFilterxs:string
    dispatchAsyncxs:boolean
    duplexxs:boolean
    dynamicOnlyxs:boolean
    localUrixs:string
    namexs:string
    networkTTLxs:integer
    objectNamexs:string
    passwordxs:stringsets the LDAP password for access credentials
    prefetchSizexs:string
    searchEventListenerxs:booleanenables/disable a persistent search to the LDAP server as defined +in draft-ietf-ldapext-psearch-03.txt (2.16.840.1.113730.3.4.3)
    searchFilterxs:stringsets the LDAP search filter as defined in RFC 2254
    searchScopexs:stringsets the LDAP search scope
    staticBridgexs:boolean
    suppressDuplicateQueueSubscriptionsxs:boolean
    suppressDuplicateTopicSubscriptionsxs:boolean
    urixs:stringreturns the next URI from the configured list
    useCompressionxs:boolean
    userxs:stringsets the LDAP user for access credentials
    userNamexs:string
    + + + + + + + + +
    ElementTypeDescription
    brokerServicebroker | brokerService
    connectionFilter<spring:bean/>
    durableDestinations(<spring:bean/>)*
    dynamicallyIncludedDestinations(<spring:bean/>)*
    excludedDestinations(<spring:bean/>)*
    staticallyIncludedDestinations(<spring:bean/>)*
    +

    Element: lease-database-locker

    + + + + + + + + +
    AttributeTypeDescription
    failIfLockedxs:boolean
    leaseHolderIdxs:string
    lockAcquireSleepIntervalxs:long
    maxAllowableDiffFromDBTimexs:integer
    namexs:string
    queryTimeoutxs:integer
    +

    Element: levelDB

    + + + + + + + + + + + + + + + + + + + + + +
    AttributeTypeDescription
    asyncBufferSizexs:integer
    brokerNamexs:string
    directoryxs:string
    failIfLockedxs:boolean
    flushDelayxs:integer
    indexBlockRestartIntervalxs:integer
    indexBlockSizexs:integer
    indexCacheSizexs:long
    indexCompressionxs:string
    indexFactoryxs:string
    indexMaxOpenFilesxs:integer
    indexWriteBufferSizexs:integer
    logCompressionxs:string
    logDirectoryxs:string
    logSizexs:long
    monitorStatsxs:boolean
    paranoidChecksxs:boolean
    syncxs:boolean
    verifyChecksumsxs:boolean
    + + + + +
    ElementTypeDescription
    brokerServicebroker | brokerService
    usageManagersystemUsage
    +

    Element: loggingBrokerPlugin

    + + + + + + + + + + +
    AttributeTypeDescription
    logAllxs:booleanLogger all Events that go through the Plugin
    logConnectionEventsxs:booleanLogger Events that are related to connections
    logConsumerEventsxs:booleanLogger Events that are related to Consumers
    logInternalEventsxs:booleanLogger Events that are normally internal to the broker
    logMessageEventsxs:booleanLogger Events that are related to message processing
    logProducerEventsxs:booleanLogger Events that are related to Producers
    logSessionEventsxs:booleanLogger Events that are related to sessions
    logTransactionEventsxs:booleanLogger Events that are related to transaction processing
    + + + + +
    ElementTypeDescription
    adminConnectionContext<spring:bean/>
    nextdestinationPathSeparatorPlugin | forcePersistencyModeBroker | loggingBrokerPlugin | multicastTraceBrokerPlugin | redeliveryPlugin | timeStampingBrokerPlugin | traceBrokerPathPlugin | udpTraceBrokerPlugin
    +

    Element: mKahaDB

    + + + + + + +
    AttributeTypeDescription
    brokerNamexs:string
    directoryxs:string
    journalMaxFileLengthxs:stringSet the max file length of the transaction journal +When set using Xbean, values of the form "20 Mb", "1024kb", and "1g" can +be used
    journalWriteBatchSizexs:stringSet the max write batch size of the transaction journal +When set using Xbean, values of the form "20 Mb", "1024kb", and "1g" can +be used
    + + + + + + + +
    ElementTypeDescription
    brokerServicebroker | brokerService
    entries(<spring:bean/>)*A helper method to allow the destination map to be populated from a +dependency injection framework such as Spring
    filteredPersistenceAdapters(<spring:bean/>)*Sets the FilteredKahaDBPersistenceAdapter entries
    transactionStore<spring:bean/>
    usageManagersystemUsage
    +

    Element: managementContext

    + + + + + + + + + + + + + +
    AttributeTypeDescription
    allowRemoteAddressInMBeanNamesxs:boolean
    brokerNamexs:stringGets the broker name this context is used by, may be null +if the broker name was not set.
    connectorHostxs:stringGet the connectorHost
    connectorPathxs:string
    connectorPortxs:string
    createConnectorxs:string
    createMBeanServerxs:boolean
    findTigerMbeanServerxs:booleanEnables/disables the searching for the Java 5 platform MBeanServer
    jmxDomainNamexs:string
    rmiServerPortxs:string
    useMBeanServerxs:boolean
    + + + + + +
    ElementTypeDescription
    MBeanServer<spring:bean/>Get the MBeanServer
    environment<spring:bean/>
    server<spring:bean/>
    +

    Element: masterConnector

    + + + + + + + + +
    AttributeTypeDescription
    failedToStartxs:booleanGet the failedToStart
    localURIxs:string
    passwordxs:string
    remoteURIxs:string
    remoteUrixs:string
    userNamexs:string
    + + + +
    ElementTypeDescription
    brokerServicebroker | brokerService
    +

    Element: maxdb-jdbc-adapter

    + + + + + +
    AttributeTypeDescription
    batchStatmentsxs:boolean
    maxRowsxs:integer
    useExternalMessageReferencesxs:boolean
    + + + +
    ElementTypeDescription
    statementsstatements
    +

    Element: memoryPersistenceAdapter

    + + + + + + +
    AttributeTypeDescription
    brokerNamexs:string
    createTransactionStorexs:boolean
    directoryxs:string
    useExternalMessageReferencesxs:boolean
    + + + +
    ElementTypeDescription
    usageManagersystemUsage
    +

    Element: memoryUsage

    + + + + + + + + + + +
    AttributeTypeDescription
    limitxs:stringSets the memory limit in bytes. Setting the limit in bytes will set the +usagePortion to 0 since the UsageManager is not going to be portion based +off the parent. +When set using Xbean, values of the form "20 Mb", "1024kb", and "1g" can be used
    namexs:string
    percentUsagexs:integer
    percentUsageMinDeltaxs:stringSets the minimum number of percentage points the usage has to change +before a UsageListener event is fired by the manager.
    pollingTimexs:integer
    portionxs:float
    usagexs:long
    usagePortionxs:float
    + + + + + +
    ElementTypeDescription
    executor<spring:bean/>
    limiterdefaultUsageCapacity | usageCapacity
    parent<spring:bean/>
    +

    Element: messageGroupHashBucketFactory

    + + + +
    AttributeTypeDescription
    bucketCountxs:integerSets the number of hash buckets to use for the message group +functionality. This is only applicable to using message groups to +parallelize processing of a queue while preserving order across an +individual JMSXGroupID header value. This value sets the number of hash +buckets that will be used (i.e. the maximum possible concurrency).
    +

    Element: mirroredQueue

    + + + + + +
    AttributeTypeDescription
    copyMessagexs:booleanSets whether a copy of the message will be sent to each destination. +Defaults to true so that the forward destination is set as the +destination of the message
    postfixxs:stringSets any postix used to identify the queue consumers
    prefixxs:stringSets the prefix wildcard used to identify the queue consumers for a given +topic
    + + + +
    ElementTypeDescription
    brokerServicebroker | brokerService
    +

    Element: multicastNetworkConnector

    + + + + + + + + + + + + + + + + + + + + + + + + + +
    AttributeTypeDescription
    alwaysSyncSendxs:boolean
    bridgeTempDestinationsxs:boolean
    brokerNamexs:string
    brokerURLxs:string
    conduitSubscriptionsxs:boolean
    consumerPriorityBasexs:integer
    decreaseNetworkConsumerPriorityxs:boolean
    destinationFilterxs:string
    dispatchAsyncxs:boolean
    duplexxs:boolean
    dynamicOnlyxs:boolean
    localUrixs:string
    namexs:string
    networkTTLxs:integer
    objectNamexs:string
    passwordxs:string
    prefetchSizexs:string
    remoteURIxs:stringSets the remote transport URI to some group transport like +multicast://address:port
    staticBridgexs:boolean
    suppressDuplicateQueueSubscriptionsxs:boolean
    suppressDuplicateTopicSubscriptionsxs:boolean
    useCompressionxs:boolean
    userNamexs:string
    + + + + + + + + + + + +
    ElementTypeDescription
    bridgecompositeDemandForwardingBridge | demandForwardingBridge
    brokerServicebroker | brokerService
    connectionFilter<spring:bean/>
    durableDestinations(<spring:bean/>)*
    dynamicallyIncludedDestinations(<spring:bean/>)*
    excludedDestinations(<spring:bean/>)*
    localTransport<spring:bean/>
    remoteTransport<spring:bean/>Sets the remote transport implementation
    staticallyIncludedDestinations(<spring:bean/>)*
    +

    Element: multicastTraceBrokerPlugin

    + + + + + + +
    AttributeTypeDescription
    broadcastxs:boolean
    destinationxs:string
    maxTraceDatagramSizexs:integer
    timeToLivexs:integer
    + + + + + + + +
    ElementTypeDescription
    address<spring:bean/>
    adminConnectionContext<spring:bean/>
    nextdestinationPathSeparatorPlugin | forcePersistencyModeBroker | loggingBrokerPlugin | multicastTraceBrokerPlugin | redeliveryPlugin | timeStampingBrokerPlugin | traceBrokerPathPlugin | udpTraceBrokerPlugin
    wireFormat<spring:bean/>
    wireFormatFactory<spring:bean/>
    +

    Element: mysql-jdbc-adapter

    + + + + + + + +
    AttributeTypeDescription
    batchStatmentsxs:boolean
    engineTypexs:string
    maxRowsxs:integer
    typeStatementxs:string
    useExternalMessageReferencesxs:boolean
    + + + +
    ElementTypeDescription
    statementsstatements
    +

    Element: networkConnector

    + + + + + + + + + + + + + + + + + + + + + + + + + + +
    AttributeTypeDescription
    alwaysSyncSendxs:boolean
    bridgeTempDestinationsxs:boolean
    brokerNamexs:string
    brokerURLxs:string
    conduitSubscriptionsxs:boolean
    consumerPriorityBasexs:integer
    decreaseNetworkConsumerPriorityxs:boolean
    destinationFilterxs:string
    discoveryURIxs:string
    dispatchAsyncxs:boolean
    duplexxs:boolean
    dynamicOnlyxs:boolean
    localUrixs:string
    namexs:string
    networkTTLxs:integer
    objectNamexs:string
    passwordxs:string
    prefetchSizexs:string
    staticBridgexs:boolean
    suppressDuplicateQueueSubscriptionsxs:boolean
    suppressDuplicateTopicSubscriptionsxs:boolean
    urixs:string
    useCompressionxs:boolean
    userNamexs:string
    + + + + + + + + + +
    ElementTypeDescription
    brokerServicebroker | brokerService
    connectionFilter<spring:bean/>
    discoveryAgent<spring:bean/>
    durableDestinations(<spring:bean/>)*
    dynamicallyIncludedDestinations(<spring:bean/>)*
    excludedDestinations(<spring:bean/>)*
    staticallyIncludedDestinations(<spring:bean/>)*
    +

    Element: noSubscriptionRecoveryPolicy

    + + + +
    ElementTypeDescription
    brokerdestinationPathSeparatorPlugin | forcePersistencyModeBroker | loggingBrokerPlugin | multicastTraceBrokerPlugin | redeliveryPlugin | timeStampingBrokerPlugin | traceBrokerPathPlugin | udpTraceBrokerPlugin
    +

    Element: oldestMessageEvictionStrategy

    + + + +
    AttributeTypeDescription
    evictExpiredMessagesHighWatermarkxs:integerSets the high water mark on which we will eagerly evict expired messages from RAM
    +

    Element: oldestMessageWithLowestPriorityEvictionStrategy

    + + + +
    AttributeTypeDescription
    evictExpiredMessagesHighWatermarkxs:integerSets the high water mark on which we will eagerly evict expired messages from RAM
    +

    Element: oracleBlobJDBCAdapter

    + + + + + +
    AttributeTypeDescription
    batchStatmentsxs:boolean
    maxRowsxs:integer
    useExternalMessageReferencesxs:boolean
    + + + +
    ElementTypeDescription
    statementsstatements
    +

    Element: oracleJDBCAdapter

    + + + + + +
    AttributeTypeDescription
    batchStatmentsxs:boolean
    maxRowsxs:integer
    useExternalMessageReferencesxs:boolean
    + + + +
    ElementTypeDescription
    statementsstatements
    +

    Element: outboundQueueBridge

    + + + + + + +
    AttributeTypeDescription
    doHandleReplyToxs:boolean
    localQueueNamexs:string
    outboundQueueNamexs:stringSets the name of the outbound queue name. If the inbound queue name +has not been set already then this method uses the provided queue name +to set the inbound topic name as well.
    selectorxs:string
    + + + + + + + + + +
    ElementTypeDescription
    consumer<spring:bean/>
    consumerConnection<spring:bean/>
    consumerQueuequeue
    jmsConnectorjmsQueueConnector | jmsTopicConnector
    jmsMessageConvertorsimpleJmsMessageConvertor
    producerConnection<spring:bean/>
    producerQueuequeue
    +

    Element: outboundTopicBridge

    + + + + + + + +
    AttributeTypeDescription
    consumerNamexs:string
    doHandleReplyToxs:boolean
    localTopicNamexs:string
    outboundTopicNamexs:stringSets the name of the outbound topic name. If the inbound topic name +has not been set already then this method uses the provided topic name +to set the inbound topic name as well.
    selectorxs:string
    + + + + + + + + + +
    ElementTypeDescription
    consumer<spring:bean/>
    consumerConnection<spring:bean/>
    consumerTopictopic
    jmsConnectorjmsQueueConnector | jmsTopicConnector
    jmsMessageConvertorsimpleJmsMessageConvertor
    producerConnection<spring:bean/>
    producerTopictopic
    +

    Element: pListStore

    + + + + + + + + + + + + + +
    AttributeTypeDescription
    cleanupIntervalxs:long
    directoryxs:string
    enableIndexWriteAsyncxs:boolean
    failIfDatabaseIsLockedxs:boolean
    indexCacheSizexs:integer
    indexEnablePageCachingxs:boolean
    indexPageSizexs:integer
    indexWriteBatchSizexs:integer
    journalMaxFileLengthxs:integer
    journalMaxWriteBatchSizexs:integer
    lazyInitxs:boolean
    + + + +
    ElementTypeDescription
    brokerServicebroker | brokerService
    +

    Element: policyEntry

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    AttributeTypeDescription
    advisoryForConsumedxs:boolean
    advisoryForDeliveryxs:boolean
    advisoryForDiscardingMessagesxs:boolean
    advisoryForFastProducersxs:boolean
    advisoryForSlowConsumersxs:boolean
    advisoryWhenFullxs:boolean
    allConsumersExclusiveByDefaultxs:boolean
    alwaysRetroactivexs:boolean
    blockedProducerWarningIntervalxs:longSet's the interval at which warnings about producers being blocked by +resource usage will be triggered. Values of 0 or less will disable +warnings
    consumersBeforeDispatchStartsxs:integer
    cursorMemoryHighWaterMarkxs:integer
    doOptimzeMessageStoragexs:boolean
    durableTopicPrefetchxs:integerGet the durableTopicPrefetch
    enableAuditxs:boolean
    expireMessagesPeriodxs:long
    gcInactiveDestinationsxs:boolean
    gcWithNetworkConsumersxs:boolean
    inactiveTimoutBeforeGCxs:long
    lazyDispatchxs:boolean
    maxAuditDepthxs:integer
    maxBrowsePageSizexs:integer
    maxExpirePageSizexs:integer
    maxPageSizexs:integer
    maxProducersToAuditxs:integer
    maxQueueAuditDepthxs:integer
    memoryLimitxs:stringWhen set using Xbean, values of the form "20 Mb", "1024kb", and "1g" can be used
    minimumMessageSizexs:long
    optimizeMessageStoreInFlightLimitxs:integer
    optimizedDispatchxs:boolean
    prioritizedMessagesxs:boolean
    producerFlowControlxs:boolean
    queuexs:stringA helper method to set the destination from a configuration file
    queueBrowserPrefetchxs:integerGet the queueBrowserPrefetch
    queuePrefetchxs:integerGet the queuePrefetch
    reduceMemoryFootprintxs:boolean
    sendAdvisoryIfNoConsumersxs:booleanSends an advisory message if a non-persistent message is sent and there +are no active consumers
    storeUsageHighWaterMarkxs:integer
    strictOrderDispatchxs:boolean
    tempQueuexs:boolean
    tempTopicxs:boolean
    timeBeforeDispatchStartsxs:integer
    topicxs:stringA helper method to set the destination from a configuration file
    topicPrefetchxs:integerGet the topicPrefetch
    useCachexs:boolean
    useConsumerPriorityxs:boolean
    usePrefetchExtensionxs:boolean
    + + + + + + + + + + + + + + +
    ElementTypeDescription
    deadLetterStrategyindividualDeadLetterStrategy | sharedDeadLetterStrategySets the policy used to determine which dead letter queue destination +should be used
    destinationqueue | tempQueue | tempTopic | topic
    dispatchPolicypriorityNetworkDispatchPolicy | roundRobinDispatchPolicy | simpleDispatchPolicy | strictOrderDispatchPolicy
    messageEvictionStrategyoldestMessageEvictionStrategy | oldestMessageWithLowestPriorityEvictionStrategy | uniquePropertyMessageEvictionStrategySets the eviction strategy used to decide which message to evict when the +slow consumer needs to discard messages
    messageGroupMapFactorymessageGroupHashBucketFactory | simpleMessageGroupMapFactorySets the factory used to create new instances of {MessageGroupMap} used +to implement the Message Groups +functionality.
    networkBridgeFilterFactoryconditionalNetworkBridgeFilterFactory | defaultNetworkBridgeFilterFactory
    pendingDurableSubscriberPolicyfileDurableSubscriberCursor | storeDurableSubscriberCursor | vmDurableCursor
    pendingMessageLimitStrategyconstantPendingMessageLimitStrategy | prefetchRatePendingMessageLimitStrategySets the strategy to calculate the maximum number of messages that are +allowed to be pending on consumers (in addition to their prefetch sizes). +Once the limit is reached, non-durable topics can then start discarding +old messages. This allows us to keep dispatching messages to slow +consumers while not blocking fast consumers and discarding the messages +oldest first.
    pendingQueuePolicyfileQueueCursor | storeCursor | vmQueueCursor
    pendingSubscriberPolicyfileCursor | vmCursor
    slowConsumerStrategyabortSlowConsumerStrategy
    subscriptionRecoveryPolicyfixedCountSubscriptionRecoveryPolicy | fixedSizedSubscriptionRecoveryPolicy | lastImageSubscriptionRecoveryPolicy | noSubscriptionRecoveryPolicy | queryBasedSubscriptionRecoveryPolicy | timedSubscriptionRecoveryPolicy
    +

    Element: policyMap

    + + + + + +
    ElementTypeDescription
    defaultEntrypolicyEntry
    entries(<spring:bean/>)*A helper method to allow the destination map to be populated from a +dependency injection framework such as Spring
    policyEntries(<spring:bean/>)*Sets the individual entries on the policy map
    +

    Element: postgresql-jdbc-adapter

    + + + + + + +
    AttributeTypeDescription
    acksPkNamexs:string
    batchStatmentsxs:boolean
    maxRowsxs:integer
    useExternalMessageReferencesxs:boolean
    + + + +
    ElementTypeDescription
    statementsstatements
    +

    Element: prefetchPolicy

    + + + + + + + + + + +
    AttributeTypeDescription
    allxs:integer
    durableTopicPrefetchxs:integer
    inputStreamPrefetchxs:integer
    maximumPendingMessageLimitxs:integerSets how many messages a broker will keep around, above the prefetch +limit, for non-durable topics before starting to discard older messages.
    optimizeDurableTopicPrefetchxs:integer
    queueBrowserPrefetchxs:integer
    queuePrefetchxs:integer
    topicPrefetchxs:integer
    +

    Element: prefetchRatePendingMessageLimitStrategy

    + + + +
    AttributeTypeDescription
    multiplierxs:doubleSets the multiplier of the prefetch size which will be used to define the maximum number of pending +messages for non-durable topics before messages are discarded.
    +

    Element: priorityNetworkDispatchPolicy

    +

    Element: proxyConnector

    + + + + + + + +
    AttributeTypeDescription
    bindxs:string
    localUrixs:string
    namexs:string
    proxyToLocalBrokerxs:boolean
    remotexs:string
    + + + +
    ElementTypeDescription
    server<spring:bean/>
    +

    Element: queryBasedSubscriptionRecoveryPolicy

    + + + + +
    ElementTypeDescription
    brokerdestinationPathSeparatorPlugin | forcePersistencyModeBroker | loggingBrokerPlugin | multicastTraceBrokerPlugin | redeliveryPlugin | timeStampingBrokerPlugin | traceBrokerPathPlugin | udpTraceBrokerPlugin
    query<spring:bean/>Sets the query strategy to load initial messages
    +

    Element: queue

    + + + + +
    AttributeTypeDescription
    namexs:string
    physicalNamexs:string
    + + + + +
    ElementTypeDescription
    compositeDestinations(queue | tempQueue | tempTopic | topic)*
    properties<spring:bean/>Get the properties from this instance for storing in JNDI
    +

    Element: queueDispatchSelector

    + + + + +
    ElementTypeDescription
    destinationqueue | tempQueue | tempTopic | topic
    exclusiveConsumer<spring:bean/>
    +

    Element: reconnectionPolicy

    + + + + + + + + + + +
    AttributeTypeDescription
    backOffMultiplierxs:doubleGets the multiplier used to grow the delay between connection attempts from the initial +time to the max set time. By default this value is set to 2.0.
    initialReconnectDelayxs:longGets the initial delay value used before a reconnection attempt is made. If the +use exponential back-off value is set to false then this will be the fixed time +between connection attempts. By default this value is set to one second.
    maxInitialConnectAttemptsxs:integerGets the maximum number of times that the {@link JmsConnector} will try +to connect on startup to before it marks itself as failed and does not +try any further connections.
    maxReconnectAttemptsxs:integerGets the number of time that {@link JmsConnector} will attempt to connect +or reconnect before giving up. By default the policy sets this value to +a negative value meaning try forever.
    maxSendRetriesxs:integerGets the maximum number of a times a Message send should be retried before +a JMSExeception is thrown indicating that the operation failed.
    maximumReconnectDelayxs:longGets the maximum delay that is inserted between each attempt to connect +before another attempt is made. The default setting for this value is +30 seconds.
    sendRetyDelayxs:longSet the amount of time the DestionationBridge will wait between attempts +to forward a message. The default policy limits the minimum time between +send attempt to one second.
    useExponentialBackOffxs:booleanGets whether the policy uses the set back-off multiplier to grow the time between +connection attempts.
    +

    Element: redeliveryPlugin

    + + + + +
    AttributeTypeDescription
    fallbackToDeadLetterxs:booleanWhat to do if there is no matching redelivery policy for a destination. +when true, the region broker DLQ processing will be used via sendToDeadLetterQueue +when false, there is no action
    sendToDlqIfMaxRetriesExceededxs:booleanWhat to do if the maxretries on a matching redelivery policy is exceeded. +when true, the region broker DLQ processing will be used via sendToDeadLetterQueue +when false, there is no action
    + + + + + +
    ElementTypeDescription
    adminConnectionContext<spring:bean/>
    nextdestinationPathSeparatorPlugin | forcePersistencyModeBroker | loggingBrokerPlugin | multicastTraceBrokerPlugin | redeliveryPlugin | timeStampingBrokerPlugin | traceBrokerPathPlugin | udpTraceBrokerPlugin
    redeliveryPolicyMapredeliveryPolicyMap
    +

    Element: redeliveryPolicy

    + + + + + + + + + + + + + + +
    AttributeTypeDescription
    backOffMultiplierxs:double
    collisionAvoidancePercentxs:short
    initialRedeliveryDelayxs:long
    maximumRedeliveriesxs:integer
    maximumRedeliveryDelayxs:long
    queuexs:stringA helper method to set the destination from a configuration file
    redeliveryDelayxs:long
    tempQueuexs:boolean
    tempTopicxs:boolean
    topicxs:stringA helper method to set the destination from a configuration file
    useCollisionAvoidancexs:boolean
    useExponentialBackOffxs:boolean
    + + + +
    ElementTypeDescription
    destinationqueue | tempQueue | tempTopic | topic
    +

    Element: redeliveryPolicyMap

    + + + + + +
    ElementTypeDescription
    defaultEntryredeliveryPolicy
    entries(<spring:bean/>)*A helper method to allow the destination map to be populated from a +dependency injection framework such as Spring
    redeliveryPolicyEntries(<spring:bean/>)*Sets the individual entries on the redeliveryPolicyMap
    +

    Element: roundRobinDispatchPolicy

    +

    Element: shared-file-locker

    + + + + + + +
    AttributeTypeDescription
    directoryxs:string
    failIfLockedxs:boolean
    lockAcquireSleepIntervalxs:long
    namexs:string
    +

    Element: sharedDeadLetterStrategy

    + + + + + +
    AttributeTypeDescription
    enableAuditxs:boolean
    processExpiredxs:boolean
    processNonPersistentxs:boolean
    + + + +
    ElementTypeDescription
    deadLetterQueuequeue | tempQueue | tempTopic | topic
    +

    Element: simpleAuthenticationPlugin

    + + + + + +
    AttributeTypeDescription
    anonymousAccessAllowedxs:boolean
    anonymousGroupxs:string
    anonymousUserxs:string
    + + + + + +
    ElementTypeDescription
    userGroups<spring:bean/>Sets the groups a user is in. The key is the user name and the value is a +Set of groups
    userPasswords<spring:bean/>Sets the map indexed by user name with the value the password
    users(<spring:bean/>)*Sets individual users for authentication
    +

    Element: simpleAuthorizationMap

    + + + + + + +
    ElementTypeDescription
    adminACLsauthorizationMap | cachedLDAPAuthorizationMap | mKahaDB | policyMap | redeliveryPolicyMap
    readACLsauthorizationMap | cachedLDAPAuthorizationMap | mKahaDB | policyMap | redeliveryPolicyMap
    tempDestinationAuthorizationEntrytempDestinationAuthorizationEntry
    writeACLsauthorizationMap | cachedLDAPAuthorizationMap | mKahaDB | policyMap | redeliveryPolicyMap
    +

    Element: simpleDispatchPolicy

    +

    Element: simpleDispatchSelector

    + + + +
    ElementTypeDescription
    destinationqueue | tempQueue | tempTopic | topic
    +

    Element: simpleJmsMessageConvertor

    + + + +
    ElementTypeDescription
    connection<spring:bean/>
    +

    Element: simpleMessageGroupMapFactory

    +

    Element: sslContext

    + + + + + + + + + + + + + + +
    AttributeTypeDescription
    keyStorexs:string
    keyStoreAlgorithmxs:string
    keyStoreKeyPasswordxs:string
    keyStorePasswordxs:string
    keyStoreTypexs:string
    protocolxs:string
    providerxs:string
    secureRandomAlgorithmxs:string
    trustStorexs:string
    trustStoreAlgorithmxs:string
    trustStorePasswordxs:string
    trustStoreTypexs:string
    + + + + + + +
    ElementTypeDescription
    SSLContext<spring:bean/>
    keyManagers(<spring:bean/>)*
    secureRandom<spring:bean/>
    trustManagers(<spring:bean/>)*
    +

    Element: statements

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    AttributeTypeDescription
    addMessageStatementxs:string
    binaryDataTypexs:string
    clearDurableLastAckInTxStatementxs:string
    clearXidFlagStatementxs:string
    containerNameDataTypexs:string
    createDurableSubStatementxs:string
    currentDateTimeStatementxs:string
    deleteOldMessagesStatementWithPriorityxs:string
    deleteSubscriptionStatementxs:string
    destinationMessageCountStatementxs:string
    dropAckPKAlterStatementEndxs:string
    durableSubAcksTableNamexs:string
    durableSubscriberMessageCountStatementxs:string
    durableSubscriberMessageCountStatementWithPriorityxs:string
    findAcksPendingOutcomeStatementxs:string
    findAllDestinationsStatementxs:string
    findAllDurableSubMessagesStatementxs:string
    findAllDurableSubsStatementxs:string
    findAllMessagesStatementxs:string
    findDurableSubMessagesStatementxs:string
    findDurableSubStatementxs:string
    findLastSequenceIdInAcksStatementxs:string
    findLastSequenceIdInMsgsStatementxs:string
    findMessageByIdStatementxs:string
    findMessageSequenceIdStatementxs:string
    findMessageStatementxs:string
    findNextMessagesStatementxs:string
    findOpsPendingOutcomeStatementxs:string
    findXidByIdStatementxs:string
    insertDurablePriorityAckStatementxs:string
    lastAckedDurableSubscriberMessageStatementxs:string
    lastProducerSequenceIdStatementxs:string
    leaseObtainStatementxs:string
    leaseOwnerStatementxs:string
    leaseUpdateStatementxs:string
    lockCreateStatementxs:string
    lockTableNamexs:string
    lockUpdateStatementxs:string
    longDataTypexs:string
    messageTableNamexs:string
    msgIdDataTypexs:string
    nextDurableSubscriberMessageStatementxs:string
    removeAllMessagesStatementxs:string
    removeAllSubscriptionsStatementxs:string
    removeMessageStatmentxs:string
    selectDurablePriorityAckStatementxs:string
    sequenceDataTypexs:string
    stringIdDataTypexs:string
    tablePrefixxs:string
    updateDurableLastAckInTxStatementxs:string
    updateDurableLastAckStatementxs:string
    updateDurableLastAckWithPriorityInTxStatementxs:string
    updateDurableLastAckWithPriorityStatementxs:string
    updateLastPriorityAckRowOfDurableSubStatementxs:string
    updateMessageStatementxs:string
    updateXidFlagStatementxs:string
    useExternalMessageReferencesxs:boolean
    useLockCreateWhereClausexs:boolean
    + + + + +
    ElementTypeDescription
    createSchemaStatements(<spring:bean/>)*
    dropSchemaStatements(<spring:bean/>)*
    +

    Element: statisticsBrokerPlugin

    +

    Element: storeCursor

    +

    Element: storeDurableSubscriberCursor

    + + + + +
    AttributeTypeDescription
    immediatePriorityDispatchxs:booleanEnsure that new higher priority messages will get an immediate dispatch +rather than wait for the end of the current cursor batch. +Useful when there is a large message backlog and intermittent high priority messages.
    useCachexs:boolean
    +

    Element: storeUsage

    + + + + + + + + +
    AttributeTypeDescription
    limitxs:stringSets the memory limit in bytes. Setting the limit in bytes will set the +usagePortion to 0 since the UsageManager is not going to be portion based +off the parent. +When set using Xbean, values of the form "20 Mb", "1024kb", and "1g" can be used
    namexs:string
    percentUsagexs:integer
    percentUsageMinDeltaxs:stringSets the minimum number of percentage points the usage has to change +before a UsageListener event is fired by the manager.
    pollingTimexs:integer
    usagePortionxs:float
    + + + + + + +
    ElementTypeDescription
    executor<spring:bean/>
    limiterdefaultUsageCapacity | usageCapacity
    parent<spring:bean/>
    storeamqPersistenceAdapter | jdbcPersistenceAdapter | journalPersistenceAdapter | kahaDB | kahaPersistenceAdapter | levelDB | mKahaDB | memoryPersistenceAdapter
    +

    Element: streamJDBCAdapter

    + + + + + +
    AttributeTypeDescription
    batchStatmentsxs:boolean
    maxRowsxs:integer
    useExternalMessageReferencesxs:boolean
    + + + +
    ElementTypeDescription
    statementsstatements
    +

    Element: strictOrderDispatchPolicy

    +

    Element: sybase-jdbc-adapter

    + + + + + +
    AttributeTypeDescription
    batchStatmentsxs:boolean
    maxRowsxs:integer
    useExternalMessageReferencesxs:boolean
    + + + +
    ElementTypeDescription
    statementsstatements
    +

    Element: systemUsage

    + + + + + + +
    AttributeTypeDescription
    namexs:string
    sendFailIfNoSpacexs:booleanSets whether or not a send() should fail if there is no space free. The +default value is false which means to block the send() method until space +becomes available
    sendFailIfNoSpaceAfterTimeoutxs:long
    sendFailIfNoSpaceExplicitySetxs:boolean
    + + + + + + + + + +
    ElementTypeDescription
    adapteramqPersistenceAdapter | jdbcPersistenceAdapter | journalPersistenceAdapter | kahaDB | kahaPersistenceAdapter | levelDB | mKahaDB | memoryPersistenceAdapter
    executor<spring:bean/>
    memoryUsagememoryUsage
    parentsystemUsage
    storeUsagestoreUsage
    tempStorepListStore
    tempUsagetempUsage
    +

    Element: taskRunnerFactory

    + + + + + + + + + +
    AttributeTypeDescription
    daemonxs:boolean
    dedicatedTaskRunnerxs:boolean
    maxIterationsPerRunxs:integer
    maxThreadPoolSizexs:integer
    namexs:string
    priorityxs:integer
    shutdownAwaitTerminationxs:long
    + + + + +
    ElementTypeDescription
    executor<spring:bean/>
    rejectedTaskHandler<spring:bean/>
    +

    Element: tempDestinationAuthorizationEntry

    + + + + + + + + + + +
    AttributeTypeDescription
    adminxs:string
    groupClassxs:string
    queuexs:stringA helper method to set the destination from a configuration file
    readxs:string
    tempQueuexs:boolean
    tempTopicxs:boolean
    topicxs:stringA helper method to set the destination from a configuration file
    writexs:string
    + + + + + + +
    ElementTypeDescription
    adminACLs(<spring:bean/>)*
    destinationqueue | tempQueue | tempTopic | topic
    readACLs(<spring:bean/>)*
    writeACLs(<spring:bean/>)*
    +

    Element: tempQueue

    + + + + + + +
    AttributeTypeDescription
    connectionIdxs:string
    namexs:string
    physicalNamexs:string
    sequenceIdxs:long
    + + + + + +
    ElementTypeDescription
    compositeDestinations(queue | tempQueue | tempTopic | topic)*
    connection<spring:bean/>
    properties<spring:bean/>Get the properties from this instance for storing in JNDI
    +

    Element: tempTopic

    + + + + + + +
    AttributeTypeDescription
    connectionIdxs:string
    namexs:string
    physicalNamexs:string
    sequenceIdxs:long
    + + + + + +
    ElementTypeDescription
    compositeDestinations(queue | tempQueue | tempTopic | topic)*
    connection<spring:bean/>
    properties<spring:bean/>Get the properties from this instance for storing in JNDI
    +

    Element: tempUsage

    + + + + + + + + +
    AttributeTypeDescription
    limitxs:stringSets the memory limit in bytes. Setting the limit in bytes will set the +usagePortion to 0 since the UsageManager is not going to be portion based +off the parent. +When set using Xbean, values of the form "20 Mb", "1024kb", and "1g" can be used
    namexs:string
    percentUsagexs:integer
    percentUsageMinDeltaxs:stringSets the minimum number of percentage points the usage has to change +before a UsageListener event is fired by the manager.
    pollingTimexs:integer
    usagePortionxs:float
    + + + + + + +
    ElementTypeDescription
    executor<spring:bean/>
    limiterdefaultUsageCapacity | usageCapacity
    parent<spring:bean/>
    storepListStore
    +

    Element: timeStampingBrokerPlugin

    + + + + + +
    AttributeTypeDescription
    futureOnlyxs:boolean
    ttlCeilingxs:longsetter method for ttlCeiling
    zeroExpirationOverridexs:longsetter method for zeroExpirationOverride
    + + + + + +
    ElementTypeDescription
    adminConnectionContext<spring:bean/>
    nextdestinationPathSeparatorPlugin | forcePersistencyModeBroker | loggingBrokerPlugin | multicastTraceBrokerPlugin | redeliveryPlugin | timeStampingBrokerPlugin | traceBrokerPathPlugin | udpTraceBrokerPlugin
    processNetworkMessages<spring:bean/>
    +

    Element: timedSubscriptionRecoveryPolicy

    + + + +
    AttributeTypeDescription
    recoverDurationxs:long
    + + + +
    ElementTypeDescription
    brokerdestinationPathSeparatorPlugin | forcePersistencyModeBroker | loggingBrokerPlugin | multicastTraceBrokerPlugin | redeliveryPlugin | timeStampingBrokerPlugin | traceBrokerPathPlugin | udpTraceBrokerPlugin
    +

    Element: topic

    + + + + +
    AttributeTypeDescription
    namexs:string
    physicalNamexs:string
    + + + + +
    ElementTypeDescription
    compositeDestinations(queue | tempQueue | tempTopic | topic)*
    properties<spring:bean/>Get the properties from this instance for storing in JNDI
    +

    Element: traceBrokerPathPlugin

    + + + +
    AttributeTypeDescription
    stampPropertyxs:string
    + + + + +
    ElementTypeDescription
    adminConnectionContext<spring:bean/>
    nextdestinationPathSeparatorPlugin | forcePersistencyModeBroker | loggingBrokerPlugin | multicastTraceBrokerPlugin | redeliveryPlugin | timeStampingBrokerPlugin | traceBrokerPathPlugin | udpTraceBrokerPlugin
    +

    Element: transact-database-locker

    + + + + + + +
    AttributeTypeDescription
    failIfLockedxs:boolean
    lockAcquireSleepIntervalxs:long
    namexs:string
    queryTimeoutxs:integer
    + + + +
    ElementTypeDescription
    exceptionHandler<spring:bean/>
    +

    Element: transact-jdbc-adapter

    + + + + + +
    AttributeTypeDescription
    batchStatmentsxs:boolean
    maxRowsxs:integer
    useExternalMessageReferencesxs:boolean
    + + + +
    ElementTypeDescription
    statementsstatements
    +

    Element: transportConnector

    + + + + + + + + + + + + + + +
    AttributeTypeDescription
    auditNetworkProducersxs:booleanEnable a producer audit on network connections, Traps the case of a missing send reply and resend. +Note: does not work with conduit=false, networked composite destinations or networked virtual topics
    disableAsyncDispatchxs:boolean
    discoveryUrixs:string
    enableStatusMonitorxs:boolean
    maximumConsumersAllowedPerConnectionxs:integer
    maximumProducersAllowedPerConnectionxs:integer
    namexs:string
    rebalanceClusterClientsxs:boolean
    updateClusterClientsxs:boolean
    updateClusterClientsOnRemovexs:boolean
    updateClusterFilterxs:string
    urixs:stringSets the server transport URI to use if there is not a +{@link TransportServer} configured via the +{@link #setServer(TransportServer)} method. This value is used to lazy +create a {@link TransportServer} instance
    + + + + + + + + +
    ElementTypeDescription
    brokerInfo<spring:bean/>
    brokerServicebroker | brokerServiceThis is called by the BrokerService right before it starts the transport.
    discoveryAgent<spring:bean/>
    messageAuthorizationPolicy<spring:bean/>Sets the policy used to decide if the current connection is authorized to +consume a given message
    server<spring:bean/>
    taskRunnerFactorytaskRunnerFactory
    +

    Element: udpTraceBrokerPlugin

    + + + + + +
    AttributeTypeDescription
    broadcastxs:boolean
    destinationxs:string
    maxTraceDatagramSizexs:integer
    + + + + + + + +
    ElementTypeDescription
    address<spring:bean/>
    adminConnectionContext<spring:bean/>
    nextdestinationPathSeparatorPlugin | forcePersistencyModeBroker | loggingBrokerPlugin | multicastTraceBrokerPlugin | redeliveryPlugin | timeStampingBrokerPlugin | traceBrokerPathPlugin | udpTraceBrokerPlugin
    wireFormat<spring:bean/>
    wireFormatFactory<spring:bean/>
    +

    Element: uniquePropertyMessageEvictionStrategy

    + + + + +
    AttributeTypeDescription
    evictExpiredMessagesHighWatermarkxs:integerSets the high water mark on which we will eagerly evict expired messages from RAM
    propertyNamexs:string
    +

    Element: usageCapacity

    + + + +
    AttributeTypeDescription
    limitxs:long
    +

    Element: virtualDestinationInterceptor

    + + + +
    ElementTypeDescription
    virtualDestinations(compositeQueue | compositeTopic | virtualTopic)*
    +

    Element: virtualSelectorCacheBrokerPlugin

    + + + +
    AttributeTypeDescription
    persistFilexs:stringSets the location of the persistent cache
    +

    Element: virtualTopic

    + + + + + + + +
    AttributeTypeDescription
    localxs:boolean
    namexs:string
    postfixxs:stringSets any postix used to identify the queue consumers
    prefixxs:stringSets the prefix wildcard used to identify the queue consumers for a given +topic
    selectorAwarexs:booleanIndicates whether the selectors of consumers are used to determine dispatch +to a virtual destination, when true only messages matching an existing +consumer will be dispatched.
    +

    Element: vmCursor

    +

    Element: vmDurableCursor

    +

    Element: vmQueueCursor

    +

    Element: xaConnectionFactory

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    AttributeTypeDescription
    alwaysSessionAsyncxs:booleanIf this flag is not set then a separate thread is not used for dispatching messages for each Session in +the Connection. However, a separate thread is always used if there is more than one session, or the session +isn't in auto acknowledge or duplicates ok mode. By default this value is set to true and session dispatch +happens asynchronously.
    alwaysSyncSendxs:booleanSet true if always require messages to be sync sent
    auditDepthxs:integer
    auditMaximumProducerNumberxs:integer
    beanNamexs:string
    brokerURLxs:stringSets the connection +URL used to connect to the ActiveMQ broker.
    checkForDuplicatesxs:boolean
    clientIDxs:stringSets the JMS clientID to use for the created connection. Note that this +can only be used by one connection at once so generally its a better idea +to set the clientID on a Connection
    clientIDPrefixxs:stringSets the prefix used by autogenerated JMS Client ID values which are used +if the JMS client does not explicitly specify on.
    closeTimeoutxs:integerSets the timeout before a close is considered complete. Normally a +close() on a connection waits for confirmation from the broker; this +allows that operation to timeout to save the client hanging if there is +no broker
    connectionIDPrefixxs:stringSets the prefix used by connection id generator
    consumerFailoverRedeliveryWaitPeriodxs:long
    copyMessageOnSendxs:booleanShould a JMS message be copied to a new JMS Message object as part of the +send() method in JMS. This is enabled by default to be compliant with the +JMS specification. You can disable it if you do not mutate JMS messages +after they are sent for a performance boost
    disableTimeStampsByDefaultxs:booleanSets whether or not timestamps on messages should be disabled or not. If +you disable them it adds a small performance boost.
    dispatchAsyncxs:booleanEnables or disables the default setting of whether or not consumers have +their messages dispatched +synchronously or asynchronously by the broker. For non-durable +topics for example we typically dispatch synchronously by default to +minimize context switches which boost performance. However sometimes its +better to go slower to ensure that a single blocked consumer socket does +not block delivery to other consumers.
    exclusiveConsumerxs:booleanEnables or disables whether or not queue consumers should be exclusive or +not for example to preserve ordering when not using Message Groups
    maxThreadPoolSizexs:integer
    messagePrioritySupportedxs:boolean
    nestedMapAndListEnabledxs:booleanEnables/disables whether or not Message properties and MapMessage entries +support Nested +Structures of Map and List objects
    nonBlockingRedeliveryxs:booleanWhen true a MessageConsumer will not stop Message delivery before re-delivering Messages +from a rolled back transaction. This implies that message order will not be preserved and +also will result in the TransactedIndividualAck option to be enabled.
    objectMessageSerializationDeferedxs:booleanWhen an object is set on an ObjectMessage, the JMS spec requires the +object to be serialized by that set method. Enabling this flag causes the +object to not get serialized. The object may subsequently get serialized +if the message needs to be sent over a socket or stored to disk.
    optimizeAcknowledgexs:boolean
    optimizeAcknowledgeTimeOutxs:longThe max time in milliseconds between optimized ack batches
    optimizedAckScheduledAckIntervalxs:longGets the configured time interval that is used to force all MessageConsumers that have optimizedAcknowledge enabled +to send an ack for any outstanding Message Acks. By default this value is set to zero meaning that the consumers +will not do any background Message acknowledgment.
    optimizedMessageDispatchxs:booleanIf this flag is set then an larger prefetch limit is used - only +applicable for durable topic subscribers.
    passwordxs:stringSets the JMS password used for connections created from this factory
    producerWindowSizexs:integer
    sendAcksAsyncxs:boolean
    sendTimeoutxs:integer
    statsEnabledxs:boolean
    transactedIndividualAckxs:booleanwhen true, submit individual transacted acks immediately rather than with transaction completion. +This allows the acks to represent delivery status which can be persisted on rollback +Used in conjunction with org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter#setRewriteOnRedelivery(boolean) true
    useAsyncSendxs:booleanForces the use of Async Sends which +adds a massive performance boost; but means that the send() method will +return immediately whether the message has been sent or not which could +lead to message loss.
    useBeanNameAsClientIdPrefixxs:boolean
    useCompressionxs:booleanEnables the use of compression of the message bodies
    useDedicatedTaskRunnerxs:boolean
    useRetroactiveConsumerxs:booleanSets whether or not retroactive consumers are enabled. Retroactive +consumers allow non-durable topic subscribers to receive old messages +that were published before the non-durable subscriber started.
    userNamexs:stringSets the JMS userName used by connections created by this factory
    warnAboutUnstartedConnectionTimeoutxs:longEnables the timeout from a connection creation to when a warning is +generated if the connection is not properly started via +{@link Connection#start()} and a message is received by a consumer. It is +a very common gotcha to forget to start +the connection so this option makes the default case to create a +warning if the user forgets. To disable the warning just set the value to < +0 (say -1).
    watchTopicAdvisoriesxs:boolean
    + + + + + + + + + + + + + + + +
    ElementTypeDescription
    blobTransferPolicy<spring:bean/>Sets the policy used to describe how out-of-band BLOBs (Binary Large +OBjects) are transferred from producers to brokers to consumers
    clientIdGenerator<spring:bean/>
    clientInternalExceptionListener<spring:bean/>Allows an {@link ClientInternalExceptionListener} to be configured on the ConnectionFactory so that when this factory +is used by frameworks which don't expose the Connection such as Spring JmsTemplate, you can register +an exception listener. +

    Note: access to this clientInternalExceptionListener will not be serialized if it is associated with more than +on connection (as it will be if more than one connection is subsequently created by this connection factory)

    connectionIdGenerator<spring:bean/>
    exceptionListenercommandAgentAllows an {@link ExceptionListener} to be configured on the ConnectionFactory so that when this factory +is used by frameworks which don't expose the Connection such as Spring JmsTemplate, you can register +an exception listener. +

    Note: access to this exceptionLinstener will not be serialized if it is associated with more than +on connection (as it will be if more than one connection is subsequently created by this connection factory)

    prefetchPolicyprefetchPolicySets the prefetch +policy for consumers created by this connection.
    properties<spring:bean/>Get the properties from this instance for storing in JNDI
    redeliveryPolicyredeliveryPolicySets the global default redelivery policy to be used when a message is delivered +but the session is rolled back
    redeliveryPolicyMapredeliveryPolicyMapSets the global redelivery policy mapping to be used when a message is delivered +but the session is rolled back
    rejectedTaskHandler<spring:bean/>
    sessionTaskRunnertaskRunnerFactory
    transformer<spring:bean/>Sets the transformer used to transform messages before they are sent on +to the JMS bus or when they are received from the bus but before they are +delivered to the JMS client
    transportListener<spring:bean/>Allows a listener to be configured on the ConnectionFactory so that when this factory is used +with frameworks which don't expose the Connection such as Spring JmsTemplate, you can still register +a transport listener.
    + + + diff --git a/activemq-core/src/main/resources/activemq.xsd.wiki b/activemq-core/src/main/resources/activemq.xsd.wiki new file mode 100644 index 0000000000..1bcf3b211e --- /dev/null +++ b/activemq-core/src/main/resources/activemq.xsd.wiki @@ -0,0 +1,3789 @@ +h3. Elements By Type +{anchor:org.apache.activemq.util.IOExceptionHandler-types} +h4. The _[org.apache.activemq.util.IOExceptionHandler|#org.apache.activemq.util.IOExceptionHandler-types]_ Type Implementations + | _[|#defaultIOExceptionHandler-element]_ | {html}{html} | + | _[|#jDBCIOExceptionHandler-element]_ | {html}{html} | + +{anchor:org.apache.activemq.security.AuthorizationMap-types} +h4. The _[org.apache.activemq.security.AuthorizationMap|#org.apache.activemq.security.AuthorizationMap-types]_ Type Implementations + | _[|#authorizationMap-element]_ | {html}Represents a destination based configuration of policies so that individual +destinations or wildcard hierarchies of destinations can be configured using +different policies. Each entry in the map represents the authorization ACLs +for each operation.{html} | + | _[|#cachedLDAPAuthorizationMap-element]_ | {html}A {@link DefaultAuthorizationMap} implementation which uses LDAP to initialize and update authorization +policy.{html} | + | _[|#lDAPAuthorizationMap-element]_ | {html}An {@link AuthorizationMap} which uses LDAP{html} | + | _[|#simpleAuthorizationMap-element]_ | {html}An AuthorizationMap which is configured with individual DestinationMaps for +each operation.{html} | + +{anchor:org.apache.activemq.usage.SystemUsage-types} +h4. The _[org.apache.activemq.usage.SystemUsage|#org.apache.activemq.usage.SystemUsage-types]_ Type Implementations + | _[|#systemUsage-element]_ | {html}Holder for Usage instances for memory, store and temp files Main use case is +manage memory usage.{html} | + +{anchor:org.apache.activemq.store.jdbc.JDBCAdapter-types} +h4. The _[org.apache.activemq.store.jdbc.JDBCAdapter|#org.apache.activemq.store.jdbc.JDBCAdapter-types]_ Type Implementations + | _[|#axionJDBCAdapter-element]_ | {html}Axion specific Adapter. + +Axion does not seem to support ALTER statements or sub-selects. This means: +- We cannot auto upgrade the schema was we roll out new versions of ActiveMQ +- We cannot delete durable sub messages that have be acknowledged by all consumers.{html} | + | _[|#blobJDBCAdapter-element]_ | {html}This JDBCAdapter inserts and extracts BLOB data using the getBlob()/setBlob() +operations. This is a little more involved since to insert a blob you have +to: + +1: insert empty blob. 2: select the blob 3: finally update the blob with data +value. + +The databases/JDBC drivers that use this adapter are: +
      +
    • +
    {html} | + | _[|#bytesJDBCAdapter-element]_ | {html}This JDBCAdapter inserts and extracts BLOB data using the +setBytes()/getBytes() operations. The databases/JDBC drivers that use this +adapter are:{html} | + | _[|#db2JDBCAdapter-element]_ | {html}{html} | + | _[|#defaultJDBCAdapter-element]_ | {html}Implements all the default JDBC operations that are used by the JDBCPersistenceAdapter.

    sub-classing is +encouraged to override the default implementation of methods to account for differences in JDBC Driver +implementations.

    The JDBCAdapter inserts and extracts BLOB data using the getBytes()/setBytes() operations.

    +The databases/JDBC drivers that use this adapter are: +

      +
    • +
    {html} | + | _[|#hsqldb-jdbc-adapter-element]_ | {html}{html} | + | _[|#imageBasedJDBCAdaptor-element]_ | {html}Provides JDBCAdapter since that uses +IMAGE datatype to hold binary data. + +The databases/JDBC drivers that use this adapter are: +
      +
    • Sybase
    • +
    • MS SQL
    • +
    {html} | + | _[|#informixJDBCAdapter-element]_ | {html}JDBC Adapter for Informix database. +Because Informix database restricts length of composite primary keys, length of +container name field and subscription id field must be reduced to 150 characters. +Therefore be sure not to use longer names for container name and subscription id than 150 characters.{html} | + | _[|#maxdb-jdbc-adapter-element]_ | {html}JDBC Adapter for the MaxDB database.{html} | + | _[|#mysql-jdbc-adapter-element]_ | {html}{html} | + | _[|#oracleBlobJDBCAdapter-element]_ | {html}Implements all the default JDBC operations that are used +by the JDBCPersistenceAdapter. +

    +Subclassing is encouraged to override the default +implementation of methods to account for differences +in JDBC Driver implementations. +

    +The JDBCAdapter inserts and extracts BLOB data using the +getBytes()/setBytes() operations. +

    +The databases/JDBC drivers that use this adapter are: +

      +
    • +
    {html} | + | _[|#oracleJDBCAdapter-element]_ | {html}Implements all the default JDBC operations that are used +by the JDBCPersistenceAdapter. +

    +Subclassing is encouraged to override the default +implementation of methods to account for differences +in JDBC Driver implementations. +

    {html} | + | _[|#postgresql-jdbc-adapter-element]_ | {html}Implements all the default JDBC operations that are used +by the JDBCPersistenceAdapter. +

    +Subclassing is encouraged to override the default +implementation of methods to account for differences +in JDBC Driver implementations. +

    +The JDBCAdapter inserts and extracts BLOB data using the +getBytes()/setBytes() operations. +

    +The databases/JDBC drivers that use this adapter are: +

      +
    • +
    {html} | + | _[|#streamJDBCAdapter-element]_ | {html}This JDBCAdapter inserts and extracts BLOB data using the +setBinaryStream()/getBinaryStream() operations. + +The databases/JDBC drivers that use this adapter are: +
      +
    • Axion
    • +
    {html} | + | _[|#sybase-jdbc-adapter-element]_ | {html}A JDBC Adapter for Sybase databases{html} | + | _[|#transact-jdbc-adapter-element]_ | {html}A JDBC Adapter for Transact-SQL based databases such as SQL Server or Sybase{html} | + +{anchor:javax.jms.Queue-types} +h4. The _[javax.jms.Queue|#javax.jms.Queue-types]_ Type Implementations + | _[|#queue-element]_ | {html}An ActiveMQ Queue{html} | + +{anchor:org.apache.activemq.broker.region.policy.SlowConsumerStrategy-types} +h4. The _[org.apache.activemq.broker.region.policy.SlowConsumerStrategy|#org.apache.activemq.broker.region.policy.SlowConsumerStrategy-types]_ Type Implementations + | _[|#abortSlowConsumerStrategy-element]_ | {html}Abort slow consumers when they reach the configured threshold of slowness, default is slow for 30 seconds{html} | + +{anchor:org.apache.activemq.network.NetworkConnector-types} +h4. The _[org.apache.activemq.network.NetworkConnector|#org.apache.activemq.network.NetworkConnector-types]_ Type Implementations + | _[|#ldapNetworkConnector-element]_ | {html}class to create dynamic network connectors listed in an directory +server using the LDAP v3 protocol as defined in RFC 2251, the +entries listed in the directory server must implement the ipHost +and ipService objectClasses as defined in RFC 2307.{html} | + | _[|#multicastNetworkConnector-element]_ | {html}A network connector which uses some kind of multicast-like transport that +communicates with potentially many remote brokers over a single logical +{@link Transport} instance such as when using multicast. + +This implementation does not depend on multicast at all; any other group +based transport could be used.{html} | + | _[|#networkConnector-element]_ | {html}A network connector which uses a discovery agent to detect the remote brokers +available and setup a connection to each available remote broker{html} | + +{anchor:org.apache.activemq.broker.region.virtual.VirtualDestination-types} +h4. The _[org.apache.activemq.broker.region.virtual.VirtualDestination|#org.apache.activemq.broker.region.virtual.VirtualDestination-types]_ Type Implementations + | _[|#compositeQueue-element]_ | {html}Represents a virtual queue which forwards to a number of other destinations.{html} | + | _[|#compositeTopic-element]_ | {html}Represents a virtual topic which forwards to a number of other destinations.{html} | + | _[|#virtualTopic-element]_ | {html}Creates Virtual +Topics using a prefix and postfix. The virtual destination creates a +wildcard that is then used to look up all active queue subscriptions which +match.{html} | + +{anchor:javax.jms.Destination-types} +h4. The _[javax.jms.Destination|#javax.jms.Destination-types]_ Type Implementations + | _[|#queue-element]_ | {html}An ActiveMQ Queue{html} | + | _[|#tempQueue-element]_ | {html}An ActiveMQ Temporary Queue Destination{html} | + | _[|#tempTopic-element]_ | {html}An ActiveMQ Temporary Topic Destination{html} | + | _[|#topic-element]_ | {html}An ActiveMQ Topic{html} | + +{anchor:org.apache.activemq.store.jdbc.JDBCPersistenceAdapter-types} +h4. The _[org.apache.activemq.store.jdbc.JDBCPersistenceAdapter|#org.apache.activemq.store.jdbc.JDBCPersistenceAdapter-types]_ Type Implementations + | _[|#jdbcPersistenceAdapter-element]_ | {html}A {@link PersistenceAdapter} implementation using JDBC for persistence +storage. + +This persistence adapter will correctly remember prepared XA transactions, +but it will not keep track of local transaction commits so that operations +performed against the Message store are done as a single uow.{html} | + +{anchor:org.apache.activemq.store.PersistenceAdapterFactory-types} +h4. The _[org.apache.activemq.store.PersistenceAdapterFactory|#org.apache.activemq.store.PersistenceAdapterFactory-types]_ Type Implementations + | _[|#amqPersistenceAdapterFactory-element]_ | {html}An implementation of {@link PersistenceAdapterFactory}{html} | + | _[|#journalPersistenceAdapterFactory-element]_ | {html}Factory class that can create PersistenceAdapter objects.{html} | + | _[|#journaledJDBC-element]_ | {html}Creates a default persistence model using the Journal and JDBC{html} | + +{anchor:org.apache.activemq.usage.MemoryUsage-types} +h4. The _[org.apache.activemq.usage.MemoryUsage|#org.apache.activemq.usage.MemoryUsage-types]_ Type Implementations + | _[|#memoryUsage-element]_ | {html}Used to keep track of how much of something is being used so that a +productive working set usage can be controlled. Main use case is manage +memory usage.{html} | + +{anchor:org.apache.activemq.broker.region.policy.DispatchPolicy-types} +h4. The _[org.apache.activemq.broker.region.policy.DispatchPolicy|#org.apache.activemq.broker.region.policy.DispatchPolicy-types]_ Type Implementations + | _[|#priorityNetworkDispatchPolicy-element]_ | {html}dispatch policy that ignores lower priority duplicate network consumers, +used in conjunction with network bridge suppresDuplicateTopicSubscriptions{html} | + | _[|#roundRobinDispatchPolicy-element]_ | {html}Simple dispatch policy that sends a message to every subscription that +matches the message.{html} | + | _[|#simpleDispatchPolicy-element]_ | {html}Simple dispatch policy that sends a message to every subscription that +matches the message.{html} | + | _[|#strictOrderDispatchPolicy-element]_ | {html}Dispatch policy that causes every subscription to see messages in the same +order.{html} | + +{anchor:org.apache.activemq.usage.TempUsage-types} +h4. The _[org.apache.activemq.usage.TempUsage|#org.apache.activemq.usage.TempUsage-types]_ Type Implementations + | _[|#tempUsage-element]_ | {html}Used to keep track of how much of something is being used so that a +productive working set usage can be controlled. Main use case is manage +memory usage.{html} | + +{anchor:org.apache.activemq.usage.StoreUsage-types} +h4. The _[org.apache.activemq.usage.StoreUsage|#org.apache.activemq.usage.StoreUsage-types]_ Type Implementations + | _[|#storeUsage-element]_ | {html}Used to keep track of how much of something is being used so that a +productive working set usage can be controlled. Main use case is manage +memory usage.{html} | + +{anchor:org.apache.activemq.network.jms.ReconnectionPolicy-types} +h4. The _[org.apache.activemq.network.jms.ReconnectionPolicy|#org.apache.activemq.network.jms.ReconnectionPolicy-types]_ Type Implementations + | _[|#reconnectionPolicy-element]_ | {html}A policy object that defines how a {@link JmsConnector} deals with +reconnection of the local and foreign connections.{html} | + +{anchor:org.apache.activemq.broker.region.policy.SubscriptionRecoveryPolicy-types} +h4. The _[org.apache.activemq.broker.region.policy.SubscriptionRecoveryPolicy|#org.apache.activemq.broker.region.policy.SubscriptionRecoveryPolicy-types]_ Type Implementations + | _[|#fixedCountSubscriptionRecoveryPolicy-element]_ | {html}This implementation of {@link SubscriptionRecoveryPolicy} will keep a fixed +count of last messages.{html} | + | _[|#fixedSizedSubscriptionRecoveryPolicy-element]_ | {html}This implementation of {@link SubscriptionRecoveryPolicy} will keep a fixed +amount of memory available in RAM for message history which is evicted in +time order.{html} | + | _[|#lastImageSubscriptionRecoveryPolicy-element]_ | {html}This implementation of {@link SubscriptionRecoveryPolicy} will only keep the +last message.{html} | + | _[|#noSubscriptionRecoveryPolicy-element]_ | {html}This SubscriptionRecoveryPolicy disable recovery of messages.{html} | + | _[|#queryBasedSubscriptionRecoveryPolicy-element]_ | {html}This implementation of {@link SubscriptionRecoveryPolicy} will perform a user +specific query mechanism to load any messages they may have missed.{html} | + | _[|#timedSubscriptionRecoveryPolicy-element]_ | {html}This implementation of {@link SubscriptionRecoveryPolicy} will keep a timed +buffer of messages around in memory and use that to recover new +subscriptions.{html} | + +{anchor:org.apache.activemq.thread.TaskRunnerFactory-types} +h4. The _[org.apache.activemq.thread.TaskRunnerFactory|#org.apache.activemq.thread.TaskRunnerFactory-types]_ Type Implementations + | _[|#taskRunnerFactory-element]_ | {html}Manages the thread pool for long running tasks. Long running tasks are not +always active but when they are active, they may need a few iterations of +processing for them to become idle. The manager ensures that each task is +processes but that no one task overtakes the system. This is kinda like +cooperative multitasking.{html} | + +{anchor:org.apache.activemq.broker.region.group.MessageGroupMapFactory-types} +h4. The _[org.apache.activemq.broker.region.group.MessageGroupMapFactory|#org.apache.activemq.broker.region.group.MessageGroupMapFactory-types]_ Type Implementations + | _[|#messageGroupHashBucketFactory-element]_ | {html}A factory to create instances of {@link SimpleMessageGroupMap} when +implementing the Message +Groups functionality.{html} | + | _[|#simpleMessageGroupMapFactory-element]_ | {html}A factory to create instances of {@link SimpleMessageGroupMap} when implementing the +Message Groups functionality.{html} | + +{anchor:org.apache.activemq.store.kahadb.plist.PListStore-types} +h4. The _[org.apache.activemq.store.kahadb.plist.PListStore|#org.apache.activemq.store.kahadb.plist.PListStore-types]_ Type Implementations + | _[|#pListStore-element]_ | {html}{html} | + +{anchor:org.apache.activemq.broker.SslContext-types} +h4. The _[org.apache.activemq.broker.SslContext|#org.apache.activemq.broker.SslContext-types]_ Type Implementations + | _[|#sslContext-element]_ | {html}Extends the SslContext so that it's easier to configure from spring.{html} | + +{anchor:javax.jms.ExceptionListener-types} +h4. The _[javax.jms.ExceptionListener|#javax.jms.ExceptionListener-types]_ Type Implementations + | _[|#commandAgent-element]_ | {html}An agent which listens to commands on a JMS destination{html} | + +{anchor:org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter-types} +h4. The _[org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter|#org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter-types]_ Type Implementations + | _[|#kahaDB-element]_ | {html}An implementation of {@link PersistenceAdapter} designed for use with +KahaDB - Embedded Lightweight Non-Relational Database{html} | + +{anchor:org.apache.activemq.broker.region.DestinationInterceptor-types} +h4. The _[org.apache.activemq.broker.region.DestinationInterceptor|#org.apache.activemq.broker.region.DestinationInterceptor-types]_ Type Implementations + | _[|#mirroredQueue-element]_ | {html}Creates Mirrored +Queue using a prefix and postfix to define the topic name on which to mirror the queue to.{html} | + | _[|#virtualDestinationInterceptor-element]_ | {html}Implements Virtual Topics.{html} | + +{anchor:org.apache.activemq.network.jms.InboundQueueBridge-types} +h4. The _[org.apache.activemq.network.jms.InboundQueueBridge|#org.apache.activemq.network.jms.InboundQueueBridge-types]_ Type Implementations + | _[|#inboundQueueBridge-element]_ | {html}Create an Inbound Queue Bridge. By default this class uses the sname name for +both the inbound and outbound queue. This behavior can be overridden however +by using the setter methods to configure both the inbound and outboud queue names +separately.{html} | + +{anchor:org.apache.activemq.broker.BrokerService-types} +h4. The _[org.apache.activemq.broker.BrokerService|#org.apache.activemq.broker.BrokerService-types]_ Type Implementations + | _[|#broker-element]_ | {html}An ActiveMQ Message Broker. It consists of a number of transport +connectors, network connectors and a bunch of properties which can be used to +configure the broker as its lazily created.{html} | + | _[|#brokerService-element]_ | {html}Manages the lifecycle of an ActiveMQ Broker. A BrokerService consists of a +number of transport connectors, network connectors and a bunch of properties +which can be used to configure the broker as its lazily created.{html} | + +{anchor:org.apache.activemq.network.DemandForwardingBridgeSupport-types} +h4. The _[org.apache.activemq.network.DemandForwardingBridgeSupport|#org.apache.activemq.network.DemandForwardingBridgeSupport-types]_ Type Implementations + | _[|#compositeDemandForwardingBridge-element]_ | {html}A demand forwarding bridge which works with multicast style transports where +a single Transport could be communicating with multiple remote brokers{html} | + | _[|#demandForwardingBridge-element]_ | {html}Forwards messages from the local broker to the remote broker based on demand.{html} | + +{anchor:javax.jms.QueueConnectionFactory-types} +h4. The _[javax.jms.QueueConnectionFactory|#javax.jms.QueueConnectionFactory-types]_ Type Implementations + | _[|#connectionFactory-element]_ | {html}A Spring enhanced connection +factory which will automatically use the Spring bean name as the clientIDPrefix property +so that connections created have client IDs related to your Spring.xml file for +easier comprehension from JMX.{html} | + | _[|#xaConnectionFactory-element]_ | {html}A Spring enhanced XA connection +factory which will automatically use the Spring bean name as the clientIDPrefix property +so that connections created have client IDs related to your Spring.xml file for +easier comprehension from JMX.{html} | + +{anchor:org.apache.activemq.ActiveMQPrefetchPolicy-types} +h4. The _[org.apache.activemq.ActiveMQPrefetchPolicy|#org.apache.activemq.ActiveMQPrefetchPolicy-types]_ Type Implementations + | _[|#prefetchPolicy-element]_ | {html}Defines the prefetch message policies for different types of consumers{html} | + +{anchor:org.apache.activemq.broker.region.policy.PolicyMap-types} +h4. The _[org.apache.activemq.broker.region.policy.PolicyMap|#org.apache.activemq.broker.region.policy.PolicyMap-types]_ Type Implementations + | _[|#policyMap-element]_ | {html}Represents a destination based configuration of policies so that individual +destinations or wildcard hierarchies of destinations can be configured using +different policies.{html} | + +{anchor:org.apache.activemq.network.jms.OutboundTopicBridge-types} +h4. The _[org.apache.activemq.network.jms.OutboundTopicBridge|#org.apache.activemq.network.jms.OutboundTopicBridge-types]_ Type Implementations + | _[|#outboundTopicBridge-element]_ | {html}Create an Outbound Topic Bridge. By default the bridge uses the same +name for both the inbound and outbound topics, however this can be altered +by using the public setter methods to configure both inbound and outbound +topic names.{html} | + +{anchor:org.apache.activemq.RedeliveryPolicy-types} +h4. The _[org.apache.activemq.RedeliveryPolicy|#org.apache.activemq.RedeliveryPolicy-types]_ Type Implementations + | _[|#redeliveryPolicy-element]_ | {html}Configuration options for a messageConsumer used to control how messages are re-delivered when they +are rolled back. +May be used server side on a per destination basis via the Broker RedeliveryPlugin{html} | + +{anchor:org.apache.activemq.Service-types} +h4. The _[org.apache.activemq.Service|#org.apache.activemq.Service-types]_ Type Implementations + | _[|#broker-element]_ | {html}An ActiveMQ Message Broker. It consists of a number of transport +connectors, network connectors and a bunch of properties which can be used to +configure the broker as its lazily created.{html} | + | _[|#brokerService-element]_ | {html}Manages the lifecycle of an ActiveMQ Broker. A BrokerService consists of a +number of transport connectors, network connectors and a bunch of properties +which can be used to configure the broker as its lazily created.{html} | + | _[|#commandAgent-element]_ | {html}An agent which listens to commands on a JMS destination{html} | + | _[|#database-locker-element]_ | {html}Represents an exclusive lock on a database to avoid multiple brokers running +against the same logical database.{html} | + | _[|#forwardingBridge-element]_ | {html}Forwards all messages from the local broker to the remote broker.{html} | + | _[|#inboundQueueBridge-element]_ | {html}Create an Inbound Queue Bridge. By default this class uses the sname name for +both the inbound and outbound queue. This behavior can be overridden however +by using the setter methods to configure both the inbound and outboud queue names +separately.{html} | + | _[|#inboundTopicBridge-element]_ | {html}Create an Inbound Topic Bridge. By default this class uses the topic name for +both the inbound and outbound topic. This behavior can be overridden however +by using the setter methods to configure both the inbound and outboud topic names +separately.{html} | + | _[|#jdbcPersistenceAdapter-element]_ | {html}A {@link PersistenceAdapter} implementation using JDBC for persistence +storage. + +This persistence adapter will correctly remember prepared XA transactions, +but it will not keep track of local transaction commits so that operations +performed against the Message store are done as a single uow.{html} | + | _[|#jmsQueueConnector-element]_ | {html}A Bridge to other JMS Queue providers{html} | + | _[|#jmsTopicConnector-element]_ | {html}A Bridge to other JMS Topic providers{html} | + | _[|#journalPersistenceAdapterFactory-element]_ | {html}Factory class that can create PersistenceAdapter objects.{html} | + | _[|#journaledJDBC-element]_ | {html}Creates a default persistence model using the Journal and JDBC{html} | + | _[|#kahaDB-element]_ | {html}An implementation of {@link PersistenceAdapter} designed for use with +KahaDB - Embedded Lightweight Non-Relational Database{html} | + | _[|#ldapNetworkConnector-element]_ | {html}class to create dynamic network connectors listed in an directory +server using the LDAP v3 protocol as defined in RFC 2251, the +entries listed in the directory server must implement the ipHost +and ipService objectClasses as defined in RFC 2307.{html} | + | _[|#lease-database-locker-element]_ | {html}Represents an exclusive lease on a database to avoid multiple brokers running +against the same logical database.{html} | + | _[|#levelDB-element]_ | {html}An implementation of {@link org.apache.activemq.store.PersistenceAdapter} designed for use with +LevelDB - Embedded Lightweight Non-Relational Database{html} | + | _[|#managementContext-element]_ | {html}An abstraction over JMX mbean registration{html} | + | _[|#masterConnector-element]_ | {html}Connects a Slave Broker to a Master when using Master Slave for High +Availability of messages.{html} | + | _[|#memoryUsage-element]_ | {html}Used to keep track of how much of something is being used so that a +productive working set usage can be controlled. Main use case is manage +memory usage.{html} | + | _[|#multicastNetworkConnector-element]_ | {html}A network connector which uses some kind of multicast-like transport that +communicates with potentially many remote brokers over a single logical +{@link Transport} instance such as when using multicast. + +This implementation does not depend on multicast at all; any other group +based transport could be used.{html} | + | _[|#networkConnector-element]_ | {html}A network connector which uses a discovery agent to detect the remote brokers +available and setup a connection to each available remote broker{html} | + | _[|#outboundQueueBridge-element]_ | {html}Create an Outbound Queue Bridge. By default the bridge uses the same +name for both the inbound and outbound queues, however this can be altered +by using the public setter methods to configure both inbound and outbound +queue names.{html} | + | _[|#outboundTopicBridge-element]_ | {html}Create an Outbound Topic Bridge. By default the bridge uses the same +name for both the inbound and outbound topics, however this can be altered +by using the public setter methods to configure both inbound and outbound +topic names.{html} | + | _[|#pListStore-element]_ | {html}{html} | + | _[|#proxyConnector-element]_ | {html}{html} | + | _[|#shared-file-locker-element]_ | {html}Represents an exclusive lock on a database to avoid multiple brokers running +against the same logical database.{html} | + | _[|#storeUsage-element]_ | {html}Used to keep track of how much of something is being used so that a +productive working set usage can be controlled. Main use case is manage +memory usage.{html} | + | _[|#systemUsage-element]_ | {html}Holder for Usage instances for memory, store and temp files Main use case is +manage memory usage.{html} | + | _[|#tempUsage-element]_ | {html}Used to keep track of how much of something is being used so that a +productive working set usage can be controlled. Main use case is manage +memory usage.{html} | + | _[|#transact-database-locker-element]_ | {html}Represents an exclusive lock on a database to avoid multiple brokers running +against the same logical database.{html} | + +{anchor:org.apache.activemq.broker.region.policy.DeadLetterStrategy-types} +h4. The _[org.apache.activemq.broker.region.policy.DeadLetterStrategy|#org.apache.activemq.broker.region.policy.DeadLetterStrategy-types]_ Type Implementations + | _[|#individualDeadLetterStrategy-element]_ | {html}A {@link DeadLetterStrategy} where each destination has its own individual +DLQ using the subject naming hierarchy.{html} | + | _[|#sharedDeadLetterStrategy-element]_ | {html}A default implementation of {@link DeadLetterStrategy} which uses +a constant destination.{html} | + +{anchor:org.apache.activemq.command.ActiveMQDestination-types} +h4. The _[org.apache.activemq.command.ActiveMQDestination|#org.apache.activemq.command.ActiveMQDestination-types]_ Type Implementations + | _[|#queue-element]_ | {html}An ActiveMQ Queue{html} | + | _[|#tempQueue-element]_ | {html}An ActiveMQ Temporary Queue Destination{html} | + | _[|#tempTopic-element]_ | {html}An ActiveMQ Temporary Topic Destination{html} | + | _[|#topic-element]_ | {html}An ActiveMQ Topic{html} | + +{anchor:org.apache.activemq.security.TempDestinationAuthorizationEntry-types} +h4. The _[org.apache.activemq.security.TempDestinationAuthorizationEntry|#org.apache.activemq.security.TempDestinationAuthorizationEntry-types]_ Type Implementations + | _[|#tempDestinationAuthorizationEntry-element]_ | {html}Represents an entry in a {@link DefaultAuthorizationMap} for assigning +different operations (read, write, admin) of user roles to a temporary +destination{html} | + +{anchor:javax.jms.Topic-types} +h4. The _[javax.jms.Topic|#javax.jms.Topic-types]_ Type Implementations + | _[|#topic-element]_ | {html}An ActiveMQ Topic{html} | + +{anchor:org.apache.activemq.broker.region.policy.PendingDurableSubscriberMessageStoragePolicy-types} +h4. The _[org.apache.activemq.broker.region.policy.PendingDurableSubscriberMessageStoragePolicy|#org.apache.activemq.broker.region.policy.PendingDurableSubscriberMessageStoragePolicy-types]_ Type Implementations + | _[|#fileDurableSubscriberCursor-element]_ | {html}Pending messages for durable subscribers{html} | + | _[|#storeDurableSubscriberCursor-element]_ | {html}Pending messages for a durable{html} | + | _[|#vmDurableCursor-element]_ | {html}Pending{html} | + +{anchor:javax.jms.TopicConnectionFactory-types} +h4. The _[javax.jms.TopicConnectionFactory|#javax.jms.TopicConnectionFactory-types]_ Type Implementations + | _[|#connectionFactory-element]_ | {html}A Spring enhanced connection +factory which will automatically use the Spring bean name as the clientIDPrefix property +so that connections created have client IDs related to your Spring.xml file for +easier comprehension from JMX.{html} | + | _[|#xaConnectionFactory-element]_ | {html}A Spring enhanced XA connection +factory which will automatically use the Spring bean name as the clientIDPrefix property +so that connections created have client IDs related to your Spring.xml file for +easier comprehension from JMX.{html} | + +{anchor:org.apache.activemq.store.jdbc.Statements-types} +h4. The _[org.apache.activemq.store.jdbc.Statements|#org.apache.activemq.store.jdbc.Statements-types]_ Type Implementations + | _[|#statements-element]_ | {html}{html} | + +{anchor:org.apache.activemq.security.AuthorizationEntry-types} +h4. The _[org.apache.activemq.security.AuthorizationEntry|#org.apache.activemq.security.AuthorizationEntry-types]_ Type Implementations + | _[|#authorizationEntry-element]_ | {html}Represents an entry in a {@link DefaultAuthorizationMap} for assigning +different operations (read, write, admin) of user roles to a specific +destination or a hierarchical wildcard area of destinations.{html} | + | _[|#tempDestinationAuthorizationEntry-element]_ | {html}Represents an entry in a {@link DefaultAuthorizationMap} for assigning +different operations (read, write, admin) of user roles to a temporary +destination{html} | + +{anchor:org.apache.activemq.filter.DestinationMap-types} +h4. The _[org.apache.activemq.filter.DestinationMap|#org.apache.activemq.filter.DestinationMap-types]_ Type Implementations + | _[|#authorizationMap-element]_ | {html}Represents a destination based configuration of policies so that individual +destinations or wildcard hierarchies of destinations can be configured using +different policies. Each entry in the map represents the authorization ACLs +for each operation.{html} | + | _[|#cachedLDAPAuthorizationMap-element]_ | {html}A {@link DefaultAuthorizationMap} implementation which uses LDAP to initialize and update authorization +policy.{html} | + | _[|#mKahaDB-element]_ | {html}An implementation of {@link org.apache.activemq.store.PersistenceAdapter} that supports +distribution of destinations across multiple kahaDB persistence adapters{html} | + | _[|#policyMap-element]_ | {html}Represents a destination based configuration of policies so that individual +destinations or wildcard hierarchies of destinations can be configured using +different policies.{html} | + | _[|#redeliveryPolicyMap-element]_ | {html}Represents a destination based configuration of policies so that individual +destinations or wildcard hierarchies of destinations can be configured using +different policies.{html} | + +{anchor:org.apache.activemq.network.NetworkBridgeConfiguration-types} +h4. The _[org.apache.activemq.network.NetworkBridgeConfiguration|#org.apache.activemq.network.NetworkBridgeConfiguration-types]_ Type Implementations + | _[|#ldapNetworkConnector-element]_ | {html}class to create dynamic network connectors listed in an directory +server using the LDAP v3 protocol as defined in RFC 2251, the +entries listed in the directory server must implement the ipHost +and ipService objectClasses as defined in RFC 2307.{html} | + | _[|#multicastNetworkConnector-element]_ | {html}A network connector which uses some kind of multicast-like transport that +communicates with potentially many remote brokers over a single logical +{@link Transport} instance such as when using multicast. + +This implementation does not depend on multicast at all; any other group +based transport could be used.{html} | + | _[|#networkConnector-element]_ | {html}A network connector which uses a discovery agent to detect the remote brokers +available and setup a connection to each available remote broker{html} | + +{anchor:org.apache.activemq.broker.region.policy.PendingQueueMessageStoragePolicy-types} +h4. The _[org.apache.activemq.broker.region.policy.PendingQueueMessageStoragePolicy|#org.apache.activemq.broker.region.policy.PendingQueueMessageStoragePolicy-types]_ Type Implementations + | _[|#fileQueueCursor-element]_ | {html}Pending{html} | + | _[|#storeCursor-element]_ | {html}Pending messages{html} | + | _[|#vmQueueCursor-element]_ | {html}Pending messages{html} | + +{anchor:org.apache.activemq.broker.region.policy.RedeliveryPolicyMap-types} +h4. The _[org.apache.activemq.broker.region.policy.RedeliveryPolicyMap|#org.apache.activemq.broker.region.policy.RedeliveryPolicyMap-types]_ Type Implementations + | _[|#redeliveryPolicyMap-element]_ | {html}Represents a destination based configuration of policies so that individual +destinations or wildcard hierarchies of destinations can be configured using +different policies.{html} | + +{anchor:org.apache.activemq.broker.region.policy.PendingSubscriberMessageStoragePolicy-types} +h4. The _[org.apache.activemq.broker.region.policy.PendingSubscriberMessageStoragePolicy|#org.apache.activemq.broker.region.policy.PendingSubscriberMessageStoragePolicy-types]_ Type Implementations + | _[|#fileCursor-element]_ | {html}Pending messages{html} | + | _[|#vmCursor-element]_ | {html}Pending messages held{html} | + +{anchor:org.apache.activemq.network.NetworkBridgeFilterFactory-types} +h4. The _[org.apache.activemq.network.NetworkBridgeFilterFactory|#org.apache.activemq.network.NetworkBridgeFilterFactory-types]_ Type Implementations + | _[|#conditionalNetworkBridgeFilterFactory-element]_ | {html}implement conditional behaviour for queue consumers, +allows replaying back to origin if no consumers are present on the local broker +after a configurable delay, irrespective of the networkTTL +Also allows rate limiting of messages through the network, useful for static includes{html} | + | _[|#defaultNetworkBridgeFilterFactory-element]_ | {html}implement default behaviour, filter that will not allow resend to origin +based on brokerPath and which respects networkTTL{html} | + +{anchor:javax.jms.ConnectionFactory-types} +h4. The _[javax.jms.ConnectionFactory|#javax.jms.ConnectionFactory-types]_ Type Implementations + | _[|#connectionFactory-element]_ | {html}A Spring enhanced connection +factory which will automatically use the Spring bean name as the clientIDPrefix property +so that connections created have client IDs related to your Spring.xml file for +easier comprehension from JMX.{html} | + | _[|#xaConnectionFactory-element]_ | {html}A Spring enhanced XA connection +factory which will automatically use the Spring bean name as the clientIDPrefix property +so that connections created have client IDs related to your Spring.xml file for +easier comprehension from JMX.{html} | + +{anchor:org.apache.activemq.broker.region.policy.PolicyEntry-types} +h4. The _[org.apache.activemq.broker.region.policy.PolicyEntry|#org.apache.activemq.broker.region.policy.PolicyEntry-types]_ Type Implementations + | _[|#policyEntry-element]_ | {html}Represents an entry in a {@link PolicyMap} for assigning policies to a +specific destination or a hierarchical wildcard area of destinations.{html} | + +{anchor:org.apache.activemq.broker.region.policy.PendingMessageLimitStrategy-types} +h4. The _[org.apache.activemq.broker.region.policy.PendingMessageLimitStrategy|#org.apache.activemq.broker.region.policy.PendingMessageLimitStrategy-types]_ Type Implementations + | _[|#constantPendingMessageLimitStrategy-element]_ | {html}This PendingMessageLimitStrategy is configured to a constant value for all subscriptions.{html} | + | _[|#prefetchRatePendingMessageLimitStrategy-element]_ | {html}This PendingMessageLimitStrategy sets the maximum pending message limit value to be +a multiplier of the prefetch limit of the subscription.{html} | + +{anchor:org.apache.activemq.usage.UsageCapacity-types} +h4. The _[org.apache.activemq.usage.UsageCapacity|#org.apache.activemq.usage.UsageCapacity-types]_ Type Implementations + | _[|#defaultUsageCapacity-element]_ | {html}Identify if a limit has been reached{html} | + | _[|#usageCapacity-element]_ | {html}Identify if a limit has been reached{html} | + +{anchor:org.apache.activemq.broker.BrokerPlugin-types} +h4. The _[org.apache.activemq.broker.BrokerPlugin|#org.apache.activemq.broker.BrokerPlugin-types]_ Type Implementations + | _[|#authorizationPlugin-element]_ | {html}An authorization plugin where each operation on a destination is checked +against an authorizationMap{html} | + | _[|#connectionDotFilePlugin-element]_ | {html}A DOT file creator plugin which +creates a DOT file showing the current connections{html} | + | _[|#destinationDotFilePlugin-element]_ | {html}A DOT +file creator plugin which creates a DOT file showing the current topic & queue hierarchies.{html} | + | _[|#destinationPathSeparatorPlugin-element]_ | {html}{html} | + | _[|#discardingDLQBrokerPlugin-element]_ | {html}{html} | + | _[|#forcePersistencyModeBrokerPlugin-element]_ | {html}A Plugin which allows to force every incoming message to be PERSISTENT or NON-PERSISTENT. + +Useful, if you have set the broker usage policy to process ONLY persistent or ONLY non-persistent +messages.{html} | + | _[|#jaasAuthenticationPlugin-element]_ | {html}Provides a JAAS based authentication plugin{html} | + | _[|#jaasCertificateAuthenticationPlugin-element]_ | {html}Provides a JAAS based SSL certificate authentication plugin{html} | + | _[|#jaasDualAuthenticationPlugin-element]_ | {html}Provides a JAAS based authentication plugin{html} | + | _[|#loggingBrokerPlugin-element]_ | {html}A simple Broker intercepter which allows you to enable/disable logging.{html} | + | _[|#multicastTraceBrokerPlugin-element]_ | {html}A Broker interceptor which allows you to trace all operations to a Multicast +socket.{html} | + | _[|#redeliveryPlugin-element]_ | {html}Replace regular DLQ handling with redelivery via a resend to the original destination +after a delay +A destination matching RedeliveryPolicy controls the quantity and delay for re-sends +If there is no matching policy or an existing policy limit is exceeded by default +regular DLQ processing resumes. This is controlled via sendToDlqIfMaxRetriesExceeded +and fallbackToDeadLetter{html} | + | _[|#simpleAuthenticationPlugin-element]_ | {html}Provides a simple authentication plugin{html} | + | _[|#statisticsBrokerPlugin-element]_ | {html}A StatisticsBrokerPlugin +You can retrieve a Map Message for a Destination - or +Broker containing statistics as key-value pairs The message must contain a +replyTo Destination - else its ignored +To retrieve stats on the broker send a empty message to ActiveMQ.Statistics.Broker (Queue or Topic) +With a replyTo set to the destination you want the stats returned to. +To retrieve stats for a destination - e.g. foo - send an empty message to ActiveMQ.Statistics.Destination.foo +- this works with wildcards to - you get a message for each wildcard match on the replyTo destination. +The stats message is a MapMessage populated with statistics for the target{html} | + | _[|#timeStampingBrokerPlugin-element]_ | {html}A Broker interceptor which updates a JMS Client's timestamp on the message +with a broker timestamp. Useful when the clocks on client machines are known +to not be correct and you can only trust the time set on the broker machines. + +Enabling this plugin will break JMS compliance since the timestamp that the +producer sees on the messages after as send() will be different from the +timestamp the consumer will observe when he receives the message. This plugin +is not enabled in the default ActiveMQ configuration. + +2 new attributes have been added which will allow the administrator some override control +over the expiration time for incoming messages: + +Attribute 'zeroExpirationOverride' can be used to apply an expiration +time to incoming messages with no expiration defined (messages that would never expire) + +Attribute 'ttlCeiling' can be used to apply a limit to the expiration time{html} | + | _[|#traceBrokerPathPlugin-element]_ | {html}The TraceBrokerPathPlugin can be used in a network of Brokers. Each Broker +that has the plugin configured, will add it's brokerName to the content +of a JMS Property. If all Brokers have this property enabled, the path the +message actually took through the network can be seen in the defined property.{html} | + | _[|#udpTraceBrokerPlugin-element]_ | {html}A Broker interceptor which allows you to trace all operations to a UDP +socket.{html} | + | _[|#virtualSelectorCacheBrokerPlugin-element]_ | {html}A plugin which allows the caching of the selector from a subscription queue. +

    +This stops the build-up of unwanted messages, especially when consumers may +disconnect from time to time when using virtual destinations. +

    +This is influenced by code snippets developed by Maciej Rakowicz{html} | + +{anchor:org.apache.activemq.network.jms.JmsMesageConvertor-types} +h4. The _[org.apache.activemq.network.jms.JmsMesageConvertor|#org.apache.activemq.network.jms.JmsMesageConvertor-types]_ Type Implementations + | _[|#simpleJmsMessageConvertor-element]_ | {html}Converts Message from one JMS to another{html} | + +{anchor:org.apache.activemq.broker.Broker-types} +h4. The _[org.apache.activemq.broker.Broker|#org.apache.activemq.broker.Broker-types]_ Type Implementations + | _[|#destinationPathSeparatorPlugin-element]_ | {html}{html} | + | _[|#forcePersistencyModeBroker-element]_ | {html}A Plugin which allows to force every incoming message to be PERSISTENT or NON-PERSISTENT. + +Useful, if you have set the broker usage policy to process ONLY persistent or ONLY non-persistent +messages.{html} | + | _[|#loggingBrokerPlugin-element]_ | {html}A simple Broker intercepter which allows you to enable/disable logging.{html} | + | _[|#multicastTraceBrokerPlugin-element]_ | {html}A Broker interceptor which allows you to trace all operations to a Multicast +socket.{html} | + | _[|#redeliveryPlugin-element]_ | {html}Replace regular DLQ handling with redelivery via a resend to the original destination +after a delay +A destination matching RedeliveryPolicy controls the quantity and delay for re-sends +If there is no matching policy or an existing policy limit is exceeded by default +regular DLQ processing resumes. This is controlled via sendToDlqIfMaxRetriesExceeded +and fallbackToDeadLetter{html} | + | _[|#timeStampingBrokerPlugin-element]_ | {html}A Broker interceptor which updates a JMS Client's timestamp on the message +with a broker timestamp. Useful when the clocks on client machines are known +to not be correct and you can only trust the time set on the broker machines. + +Enabling this plugin will break JMS compliance since the timestamp that the +producer sees on the messages after as send() will be different from the +timestamp the consumer will observe when he receives the message. This plugin +is not enabled in the default ActiveMQ configuration. + +2 new attributes have been added which will allow the administrator some override control +over the expiration time for incoming messages: + +Attribute 'zeroExpirationOverride' can be used to apply an expiration +time to incoming messages with no expiration defined (messages that would never expire) + +Attribute 'ttlCeiling' can be used to apply a limit to the expiration time{html} | + | _[|#traceBrokerPathPlugin-element]_ | {html}The TraceBrokerPathPlugin can be used in a network of Brokers. Each Broker +that has the plugin configured, will add it's brokerName to the content +of a JMS Property. If all Brokers have this property enabled, the path the +message actually took through the network can be seen in the defined property.{html} | + | _[|#udpTraceBrokerPlugin-element]_ | {html}A Broker interceptor which allows you to trace all operations to a UDP +socket.{html} | + +{anchor:org.apache.activemq.broker.TransportConnector-types} +h4. The _[org.apache.activemq.broker.TransportConnector|#org.apache.activemq.broker.TransportConnector-types]_ Type Implementations + | _[|#transportConnector-element]_ | {html}{html} | + +{anchor:org.apache.activemq.network.jms.InboundTopicBridge-types} +h4. The _[org.apache.activemq.network.jms.InboundTopicBridge|#org.apache.activemq.network.jms.InboundTopicBridge-types]_ Type Implementations + | _[|#inboundTopicBridge-element]_ | {html}Create an Inbound Topic Bridge. By default this class uses the topic name for +both the inbound and outbound topic. This behavior can be overridden however +by using the setter methods to configure both the inbound and outboud topic names +separately.{html} | + +{anchor:org.apache.activemq.network.jms.JmsConnector-types} +h4. The _[org.apache.activemq.network.jms.JmsConnector|#org.apache.activemq.network.jms.JmsConnector-types]_ Type Implementations + | _[|#jmsQueueConnector-element]_ | {html}A Bridge to other JMS Queue providers{html} | + | _[|#jmsTopicConnector-element]_ | {html}A Bridge to other JMS Topic providers{html} | + +{anchor:org.apache.activemq.store.PersistenceAdapter-types} +h4. The _[org.apache.activemq.store.PersistenceAdapter|#org.apache.activemq.store.PersistenceAdapter-types]_ Type Implementations + | _[|#amqPersistenceAdapter-element]_ | {html}An implementation of {@link PersistenceAdapter} designed for use with a +{@link Journal} and then check pointing asynchronously on a timeout with some +other long term persistent storage.{html} | + | _[|#jdbcPersistenceAdapter-element]_ | {html}A {@link PersistenceAdapter} implementation using JDBC for persistence +storage. + +This persistence adapter will correctly remember prepared XA transactions, +but it will not keep track of local transaction commits so that operations +performed against the Message store are done as a single uow.{html} | + | _[|#journalPersistenceAdapter-element]_ | {html}An implementation of {@link PersistenceAdapter} designed for use with a +{@link Journal} and then check pointing asynchronously on a timeout with some +other long term persistent storage.{html} | + | _[|#kahaDB-element]_ | {html}An implementation of {@link PersistenceAdapter} designed for use with +KahaDB - Embedded Lightweight Non-Relational Database{html} | + | _[|#kahaPersistenceAdapter-element]_ | {html}{html} | + | _[|#levelDB-element]_ | {html}An implementation of {@link org.apache.activemq.store.PersistenceAdapter} designed for use with +LevelDB - Embedded Lightweight Non-Relational Database{html} | + | _[|#mKahaDB-element]_ | {html}An implementation of {@link org.apache.activemq.store.PersistenceAdapter} that supports +distribution of destinations across multiple kahaDB persistence adapters{html} | + | _[|#memoryPersistenceAdapter-element]_ | {html}{html} | + +{anchor:org.apache.activemq.network.jms.OutboundQueueBridge-types} +h4. The _[org.apache.activemq.network.jms.OutboundQueueBridge|#org.apache.activemq.network.jms.OutboundQueueBridge-types]_ Type Implementations + | _[|#outboundQueueBridge-element]_ | {html}Create an Outbound Queue Bridge. By default the bridge uses the same +name for both the inbound and outbound queues, however this can be altered +by using the public setter methods to configure both inbound and outbound +queue names.{html} | + +{anchor:org.apache.activemq.broker.Locker-types} +h4. The _[org.apache.activemq.broker.Locker|#org.apache.activemq.broker.Locker-types]_ Type Implementations + | _[|#database-locker-element]_ | {html}Represents an exclusive lock on a database to avoid multiple brokers running +against the same logical database.{html} | + | _[|#lease-database-locker-element]_ | {html}Represents an exclusive lease on a database to avoid multiple brokers running +against the same logical database.{html} | + | _[|#shared-file-locker-element]_ | {html}Represents an exclusive lock on a database to avoid multiple brokers running +against the same logical database.{html} | + | _[|#transact-database-locker-element]_ | {html}Represents an exclusive lock on a database to avoid multiple brokers running +against the same logical database.{html} | + +{anchor:org.apache.activemq.broker.region.policy.MessageEvictionStrategy-types} +h4. The _[org.apache.activemq.broker.region.policy.MessageEvictionStrategy|#org.apache.activemq.broker.region.policy.MessageEvictionStrategy-types]_ Type Implementations + | _[|#oldestMessageEvictionStrategy-element]_ | {html}An eviction strategy which evicts the oldest message first (which is the +default).{html} | + | _[|#oldestMessageWithLowestPriorityEvictionStrategy-element]_ | {html}An eviction strategy which evicts the oldest message with the lowest priority first.{html} | + | _[|#uniquePropertyMessageEvictionStrategy-element]_ | {html}An eviction strategy which evicts the oldest message within messages with the same property value{html} | + +{anchor:org.apache.activemq.broker.jmx.ManagementContext-types} +h4. The _[org.apache.activemq.broker.jmx.ManagementContext|#org.apache.activemq.broker.jmx.ManagementContext-types]_ Type Implementations + | _[|#managementContext-element]_ | {html}An abstraction over JMX mbean registration{html} | + +{anchor:org.apache.activemq.filter.DestinationMapEntry-types} +h4. The _[org.apache.activemq.filter.DestinationMapEntry|#org.apache.activemq.filter.DestinationMapEntry-types]_ Type Implementations + | _[|#authorizationEntry-element]_ | {html}Represents an entry in a {@link DefaultAuthorizationMap} for assigning +different operations (read, write, admin) of user roles to a specific +destination or a hierarchical wildcard area of destinations.{html} | + | _[|#destinationEntry-element]_ | {html}A default entry in a DestinationMap which holds a single value.{html} | + | _[|#filteredKahaDB-element]_ | {html}{html} | + | _[|#policyEntry-element]_ | {html}Represents an entry in a {@link PolicyMap} for assigning policies to a +specific destination or a hierarchical wildcard area of destinations.{html} | + | _[|#redeliveryPolicy-element]_ | {html}Configuration options for a messageConsumer used to control how messages are re-delivered when they +are rolled back. +May be used server side on a per destination basis via the Broker RedeliveryPlugin{html} | + | _[|#tempDestinationAuthorizationEntry-element]_ | {html}Represents an entry in a {@link DefaultAuthorizationMap} for assigning +different operations (read, write, admin) of user roles to a temporary +destination{html} | + + +{anchor:abortSlowConsumerStrategy-element} +h3. The _[|#abortSlowConsumerStrategy-element]_ Element + {html}Abort slow consumers when they reach the configured threshold of slowness, default is slow for 30 seconds{html} +h4. Properties + || Property Name || Type || Description || + | abortConnection | _boolean_ | {html}abort the consumers connection rather than sending a stop command to the remote consumer{html} | + | brokerService | _[org.apache.activemq.broker.Broker|#org.apache.activemq.broker.Broker-types]_ | {html}{html} | + | checkPeriod | _long_ | {html}time in milliseconds between checks for slow subscriptions{html} | + | maxSlowCount | _long_ | {html}number of times a subscription can be deemed slow before triggering abort +effect depends on dispatch rate as slow determination is done on dispatch{html} | + | maxSlowDuration | _long_ | {html}time in milliseconds that a sub can remain slow before triggering +an abort.{html} | + | name | _java.lang.String_ | {html}{html} | + +{anchor:amqPersistenceAdapter-element} +h3. The _[|#amqPersistenceAdapter-element]_ Element + {html}An implementation of {@link PersistenceAdapter} designed for use with a +{@link Journal} and then check pointing asynchronously on a timeout with some +other long term persistent storage.{html} +h4. Properties + || Property Name || Type || Description || + | archiveDataLogs | _boolean_ | {html}{html} | + | asyncDataManager | _org.apache.activemq.kaha.impl.async.AsyncDataManager_ | {html}{html} | + | brokerName | _java.lang.String_ | {html}{html} | + | brokerService | _[org.apache.activemq.broker.BrokerService|#org.apache.activemq.broker.BrokerService-types]_ | {html}{html} | + | checkpointInterval | _long_ | {html}{html} | + | cleanupInterval | _long_ | {html}{html} | + | directory | _java.io.File_ | {html}{html} | + | directoryArchive | _java.io.File_ | {html}{html} | + | disableLocking | _boolean_ | {html}{html} | + | forceRecoverReferenceStore | _boolean_ | {html}{html} | + | indexBinSize | _int_ | {html}{html} | + | indexKeySize | _int_ | {html}{html} | + | indexLoadFactor | _int_ | {html}{html} | + | indexMaxBinSize | _int_ | {html}{html} | + | indexPageSize | _java.lang.String_ | {html}When set using Xbean, values of the form "20 Mb", "1024kb", and "1g" can be used{html} | + | journalThreadPriority | _int_ | {html}{html} | + | maxCheckpointMessageAddSize | _java.lang.String_ | {html}When set using Xbean, values of the form "20 Mb", "1024kb", and "1g" can be used{html} | + | maxFileLength | _java.lang.String_ | {html}When set using Xbean, values of the form "20 Mb", "1024kb", and "1g" can be used{html} | + | maxReferenceFileLength | _java.lang.String_ | {html}When set using Xbean, values of the form "20 Mb", "1024kb", and "1g" can be used{html} | + | persistentIndex | _boolean_ | {html}{html} | + | recoverReferenceStore | _boolean_ | {html}{html} | + | referenceStoreAdapter | _org.apache.activemq.store.ReferenceStoreAdapter_ | {html}{html} | + | syncOnWrite | _boolean_ | {html}{html} | + | taskRunnerFactory | _[org.apache.activemq.thread.TaskRunnerFactory|#org.apache.activemq.thread.TaskRunnerFactory-types]_ | {html}{html} | + | usageManager | _[org.apache.activemq.usage.SystemUsage|#org.apache.activemq.usage.SystemUsage-types]_ | {html}{html} | + | useDedicatedTaskRunner | _boolean_ | {html}{html} | + | useNio | _boolean_ | {html}{html} | + | wireFormat | _org.apache.activemq.wireformat.WireFormat_ | {html}{html} | + +{anchor:amqPersistenceAdapterFactory-element} +h3. The _[|#amqPersistenceAdapterFactory-element]_ Element + {html}An implementation of {@link PersistenceAdapterFactory}{html} +h4. Properties + || Property Name || Type || Description || + | brokerName | _java.lang.String_ | {html}{html} | + | checkpointInterval | _long_ | {html}{html} | + | cleanupInterval | _long_ | {html}{html} | + | dataDirectory | _java.io.File_ | {html}{html} | + | forceRecoverReferenceStore | _boolean_ | {html}{html} | + | indexBinSize | _int_ | {html}{html} | + | indexKeySize | _int_ | {html}{html} | + | indexLoadFactor | _int_ | {html}{html} | + | indexMaxBinSize | _int_ | {html}{html} | + | indexPageSize | _int_ | {html}{html} | + | journalThreadPriority | _int_ | {html}{html} | + | maxFileLength | _int_ | {html}{html} | + | maxReferenceFileLength | _int_ | {html}{html} | + | persistentIndex | _boolean_ | {html}{html} | + | recoverReferenceStore | _boolean_ | {html}{html} | + | referenceStoreAdapter | _org.apache.activemq.store.ReferenceStoreAdapter_ | {html}{html} | + | syncOnTransaction | _boolean_ | {html}{html} | + | syncOnWrite | _boolean_ | {html}{html} | + | taskRunnerFactory | _[org.apache.activemq.thread.TaskRunnerFactory|#org.apache.activemq.thread.TaskRunnerFactory-types]_ | {html}{html} | + | useDedicatedTaskRunner | _boolean_ | {html}{html} | + | useNio | _boolean_ | {html}{html} | + +{anchor:authenticationUser-element} +h3. The _[|#authenticationUser-element]_ Element + {html}A helper object used to configure simple authentiaction plugin{html} +h4. Properties + || Property Name || Type || Description || + | groups | _java.lang.String_ | {html}{html} | + | password | _java.lang.String_ | {html}{html} | + | username | _java.lang.String_ | {html}{html} | + +{anchor:authorizationEntry-element} +h3. The _[|#authorizationEntry-element]_ Element + {html}Represents an entry in a {@link DefaultAuthorizationMap} for assigning +different operations (read, write, admin) of user roles to a specific +destination or a hierarchical wildcard area of destinations.{html} +h4. Properties + || Property Name || Type || Description || + | admin | _java.lang.String_ | {html}{html} | + | adminACLs | (_java.lang.Object_)\* | {html}{html} | + | destination | _[org.apache.activemq.command.ActiveMQDestination|#org.apache.activemq.command.ActiveMQDestination-types]_ | {html}{html} | + | groupClass | _java.lang.String_ | {html}{html} | + | queue | _java.lang.String_ | {html}A helper method to set the destination from a configuration file{html} | + | read | _java.lang.String_ | {html}{html} | + | readACLs | (_java.lang.Object_)\* | {html}{html} | + | tempQueue | _boolean_ | {html}{html} | + | tempTopic | _boolean_ | {html}{html} | + | topic | _java.lang.String_ | {html}A helper method to set the destination from a configuration file{html} | + | write | _java.lang.String_ | {html}{html} | + | writeACLs | (_java.lang.Object_)\* | {html}{html} | + +{anchor:authorizationMap-element} +h3. The _[|#authorizationMap-element]_ Element + {html}Represents a destination based configuration of policies so that individual +destinations or wildcard hierarchies of destinations can be configured using +different policies. Each entry in the map represents the authorization ACLs +for each operation.{html} +h4. Properties + || Property Name || Type || Description || + | authorizationEntries | (_java.lang.Object_)\* | {html}Sets the individual entries on the authorization map{html} | + | defaultEntry | _[org.apache.activemq.security.AuthorizationEntry|#org.apache.activemq.security.AuthorizationEntry-types]_ | {html}{html} | + | entries | (_java.lang.Object_)\* | {html}A helper method to allow the destination map to be populated from a +dependency injection framework such as Spring{html} | + | tempDestinationAuthorizationEntry | _[org.apache.activemq.security.TempDestinationAuthorizationEntry|#org.apache.activemq.security.TempDestinationAuthorizationEntry-types]_ | {html}{html} | + +{anchor:authorizationPlugin-element} +h3. The _[|#authorizationPlugin-element]_ Element + {html}An authorization plugin where each operation on a destination is checked +against an authorizationMap{html} +h4. Properties + || Property Name || Type || Description || + | map | _[org.apache.activemq.security.AuthorizationMap|#org.apache.activemq.security.AuthorizationMap-types]_ | {html}{html} | + +{anchor:axionJDBCAdapter-element} +h3. The _[|#axionJDBCAdapter-element]_ Element + {html}Axion specific Adapter. + +Axion does not seem to support ALTER statements or sub-selects. This means: +- We cannot auto upgrade the schema was we roll out new versions of ActiveMQ +- We cannot delete durable sub messages that have be acknowledged by all consumers.{html} +h4. Properties + || Property Name || Type || Description || + | batchStatments | _boolean_ | {html}{html} | + | maxRows | _int_ | {html}{html} | + | statements | _[org.apache.activemq.store.jdbc.Statements|#org.apache.activemq.store.jdbc.Statements-types]_ | {html}{html} | + | useExternalMessageReferences | _boolean_ | {html}{html} | + +{anchor:blobJDBCAdapter-element} +h3. The _[|#blobJDBCAdapter-element]_ Element + {html}This JDBCAdapter inserts and extracts BLOB data using the getBlob()/setBlob() +operations. This is a little more involved since to insert a blob you have +to: + +1: insert empty blob. 2: select the blob 3: finally update the blob with data +value. + +The databases/JDBC drivers that use this adapter are: +

      +
    • +
    {html} +h4. Properties + || Property Name || Type || Description || + | batchStatments | _boolean_ | {html}{html} | + | maxRows | _int_ | {html}{html} | + | statements | _[org.apache.activemq.store.jdbc.Statements|#org.apache.activemq.store.jdbc.Statements-types]_ | {html}{html} | + | useExternalMessageReferences | _boolean_ | {html}{html} | + +{anchor:broker-element} +h3. The _[|#broker-element]_ Element + {html}An ActiveMQ Message Broker. It consists of a number of transport +connectors, network connectors and a bunch of properties which can be used to +configure the broker as its lazily created.{html} +h4. Properties + || Property Name || Type || Description || + | adminView | _org.apache.activemq.broker.jmx.BrokerView_ | {html}Returns the administration view of the broker; used to create and destroy +resources such as queues and topics. Note this method returns null if JMX +is disabled.{html} | + | advisorySupport | _java.lang.String_ | {html}Allows the support of advisory messages to be disabled for performance +reasons.{html} | + | allowTempAutoCreationOnSend | _boolean_ | {html}enable if temp destinations need to be propagated through a network when +advisorySupport==false. This is used in conjunction with the policy +gcInactiveDestinations for matching temps so they can get removed +when inactive{html} | + | brokerContext | _org.apache.activemq.broker.BrokerContext_ | {html}{html} | + | brokerId | _java.lang.String_ | {html}{html} | + | brokerName | _java.lang.String_ | {html}Sets the name of this broker; which must be unique in the network{html} | + | brokerObjectName | _javax.management.ObjectName_ | {html}Sets the JMX ObjectName for this broker{html} | + | cacheTempDestinations | _boolean_ | {html}{html} | + | consumerSystemUsage | _[org.apache.activemq.usage.SystemUsage|#org.apache.activemq.usage.SystemUsage-types]_ | {html}{html} | + | consumerSystemUsagePortion | _int_ | {html}{html} | + | dataDirectory | _java.lang.String_ | {html}Sets the directory in which the data files will be stored by default for +the JDBC and Journal persistence adaptors.{html} | + | dataDirectoryFile | _java.io.File_ | {html}Sets the directory in which the data files will be stored by default for +the JDBC and Journal persistence adaptors.{html} | + | dedicatedTaskRunner | _boolean_ | {html}{html} | + | deleteAllMessagesOnStartup | _java.lang.String_ | {html}Sets whether or not all messages are deleted on startup - mostly only +useful for testing.{html} | + | destinationFactory | _org.apache.activemq.broker.region.DestinationFactory_ | {html}{html} | + | destinationInterceptors | (_[org.apache.activemq.broker.region.DestinationInterceptor|#org.apache.activemq.broker.region.DestinationInterceptor-types]_)\* | {html}Sets the destination interceptors to use{html} | + | destinationPolicy | _[org.apache.activemq.broker.region.policy.PolicyMap|#org.apache.activemq.broker.region.policy.PolicyMap-types]_ | {html}Sets the destination specific policies available either for exact +destinations or for wildcard areas of destinations.{html} | + | destinations | (_[org.apache.activemq.command.ActiveMQDestination|#org.apache.activemq.command.ActiveMQDestination-types]_)\* | {html}Sets the destinations which should be loaded/created on startup{html} | + | enableStatistics | _boolean_ | {html}Sets whether or not the Broker's services enable statistics or not.{html} | + | ioExceptionHandler | _[org.apache.activemq.util.IOExceptionHandler|#org.apache.activemq.util.IOExceptionHandler-types]_ | {html}override the Default IOException handler, called when persistence adapter +has experiences File or JDBC I/O Exceptions{html} | + | jmsBridgeConnectors | (_[org.apache.activemq.network.jms.JmsConnector|#org.apache.activemq.network.jms.JmsConnector-types]_)\* | {html}{html} | + | keepDurableSubsActive | _boolean_ | {html}{html} | + | managementContext | _[org.apache.activemq.broker.jmx.ManagementContext|#org.apache.activemq.broker.jmx.ManagementContext-types]_ | {html}{html} | + | masterConnectorURI | _java.lang.String_ | {html}{html} | + | maxPurgedDestinationsPerSweep | _int_ | {html}{html} | + | mbeanInvocationTimeout | _long_ | {html}Gets the time in Milliseconds that an invocation of an MBean method will wait before +failing. The default value is to wait forever (zero).{html} | + | messageAuthorizationPolicy | _org.apache.activemq.security.MessageAuthorizationPolicy_ | {html}Sets the policy used to decide if the current connection is authorized to +consume a given message{html} | + | monitorConnectionSplits | _boolean_ | {html}{html} | + | networkConnectorStartAsync | _boolean_ | {html}{html} | + | networkConnectorURIs | (_java.lang.String_)\* | {html}{html} | + | networkConnectors | (_[org.apache.activemq.network.NetworkConnector|#org.apache.activemq.network.NetworkConnector-types]_)\* | {html}Sets the network connectors which this broker will use to connect to +other brokers in a federated network{html} | + | offlineDurableSubscriberTaskSchedule | _int_ | {html}{html} | + | offlineDurableSubscriberTimeout | _int_ | {html}{html} | + | passiveSlave | _java.lang.String_ | {html}Get the passiveSlave{html} | + | persistenceAdapter | _[org.apache.activemq.store.PersistenceAdapter|#org.apache.activemq.store.PersistenceAdapter-types]_ | {html}Sets the persistence adaptor implementation to use for this broker{html} | + | persistenceFactory | _[org.apache.activemq.store.PersistenceAdapterFactory|#org.apache.activemq.store.PersistenceAdapterFactory-types]_ | {html}{html} | + | persistenceTaskRunnerFactory | _[org.apache.activemq.thread.TaskRunnerFactory|#org.apache.activemq.thread.TaskRunnerFactory-types]_ | {html}{html} | + | persistenceThreadPriority | _int_ | {html}{html} | + | persistent | _java.lang.String_ | {html}Sets whether or not persistence is enabled or disabled.{html} | + | plugins | (_[org.apache.activemq.broker.BrokerPlugin|#org.apache.activemq.broker.BrokerPlugin-types]_)\* | {html}Sets a number of broker plugins to install such as for security +authentication or authorization{html} | + | populateJMSXUserID | _boolean_ | {html}Sets whether or not the broker should populate the JMSXUserID header.{html} | + | populateUserNameInMBeans | _boolean_ | {html}Should MBeans that support showing the Authenticated User Name information have this +value filled in or not.{html} | + | producerSystemUsage | _[org.apache.activemq.usage.SystemUsage|#org.apache.activemq.usage.SystemUsage-types]_ | {html}{html} | + | producerSystemUsagePortion | _int_ | {html}{html} | + | proxyConnectors | (_java.lang.Object_)\* | {html}Sets the network connectors which this broker will use to connect to +other brokers in a federated network{html} | + | regionBroker | _[org.apache.activemq.broker.Broker|#org.apache.activemq.broker.Broker-types]_ | {html}{html} | + | schedulePeriodForDestinationPurge | _int_ | {html}{html} | + | schedulerDirectory | _java.lang.String_ | {html}{html} | + | schedulerDirectoryFile | _java.io.File_ | {html}{html} | + | schedulerSupport | _java.lang.String_ | {html}{html} | + | services | (_[org.apache.activemq.Service|#org.apache.activemq.Service-types]_)\* | {html}Sets the services associated with this broker such as a +{@link MasterConnector}{html} | + | shutdownHooks | (_java.lang.Object_)\* | {html}Sets hooks to be executed when broker shut down{html} | + | shutdownOnMasterFailure | _boolean_ | {html}{html} | + | shutdownOnSlaveFailure | _java.lang.String_ | {html}{html} | + | splitSystemUsageForProducersConsumers | _boolean_ | {html}{html} | + | sslContext | _[org.apache.activemq.broker.SslContext|#org.apache.activemq.broker.SslContext-types]_ | {html}{html} | + | start | _boolean_ | {html}Sets whether or not the broker is started along with the ApplicationContext it is defined within. +Normally you would want the broker to start up along with the ApplicationContext but sometimes when working +with JUnit tests you may wish to start and stop the broker explicitly yourself.{html} | + | startAsync | _boolean_ | {html}{html} | + | supportFailOver | _boolean_ | {html}{html} | + | systemExitOnShutdown | _java.lang.String_ | {html}{html} | + | systemExitOnShutdownExitCode | _int_ | {html}{html} | + | systemUsage | _[org.apache.activemq.usage.SystemUsage|#org.apache.activemq.usage.SystemUsage-types]_ | {html}{html} | + | taskRunnerFactory | _[org.apache.activemq.thread.TaskRunnerFactory|#org.apache.activemq.thread.TaskRunnerFactory-types]_ | {html}{html} | + | taskRunnerPriority | _int_ | {html}{html} | + | tempDataStore | _[org.apache.activemq.store.kahadb.plist.PListStore|#org.apache.activemq.store.kahadb.plist.PListStore-types]_ | {html}{html} | + | timeBeforePurgeTempDestinations | _int_ | {html}{html} | + | tmpDataDirectory | _java.io.File_ | {html}{html} | + | transportConnectorURIs | (_java.lang.String_)\* | {html}{html} | + | transportConnectors | (_[org.apache.activemq.broker.TransportConnector|#org.apache.activemq.broker.TransportConnector-types]_)\* | {html}Sets the transport connectors which this broker will listen on for new +clients{html} | + | useAuthenticatedPrincipalForJMSXUserID | _boolean_ | {html}{html} | + | useJmx | _java.lang.String_ | {html}Sets whether or not the Broker's services should be exposed into JMX or +not.{html} | + | useLocalHostBrokerName | _boolean_ | {html}{html} | + | useLoggingForShutdownErrors | _boolean_ | {html}Sets whether or not we should use commons-logging when reporting errors +when shutting down the broker{html} | + | useMirroredQueues | _boolean_ | {html}Sets whether or not Mirrored +Queues should be supported by default if they have not been +explicitly configured.{html} | + | useShutdownHook | _boolean_ | {html}Sets whether or not we should use a shutdown handler to close down the +broker cleanly if the JVM is terminated. It is recommended you leave this +enabled.{html} | + | useTempMirroredQueues | _boolean_ | {html}{html} | + | useVirtualTopics | _boolean_ | {html}Sets whether or not Virtual +Topics should be supported by default if they have not been +explicitly configured.{html} | + | vmConnectorURI | _java.net.URI_ | {html}{html} | + | waitForSlave | _java.lang.String_ | {html}{html} | + | waitForSlaveTimeout | _long_ | {html}{html} | + +{anchor:brokerService-element} +h3. The _[|#brokerService-element]_ Element + {html}Manages the lifecycle of an ActiveMQ Broker. A BrokerService consists of a +number of transport connectors, network connectors and a bunch of properties +which can be used to configure the broker as its lazily created.{html} +h4. Properties + || Property Name || Type || Description || + | adminView | _org.apache.activemq.broker.jmx.BrokerView_ | {html}Returns the administration view of the broker; used to create and destroy +resources such as queues and topics. Note this method returns null if JMX +is disabled.{html} | + | advisorySupport | _java.lang.String_ | {html}Allows the support of advisory messages to be disabled for performance +reasons.{html} | + | allowTempAutoCreationOnSend | _boolean_ | {html}enable if temp destinations need to be propagated through a network when +advisorySupport==false. This is used in conjunction with the policy +gcInactiveDestinations for matching temps so they can get removed +when inactive{html} | + | brokerContext | _org.apache.activemq.broker.BrokerContext_ | {html}{html} | + | brokerId | _java.lang.String_ | {html}{html} | + | brokerName | _java.lang.String_ | {html}Sets the name of this broker; which must be unique in the network{html} | + | brokerObjectName | _javax.management.ObjectName_ | {html}Sets the JMX ObjectName for this broker{html} | + | cacheTempDestinations | _boolean_ | {html}{html} | + | consumerSystemUsage | _[org.apache.activemq.usage.SystemUsage|#org.apache.activemq.usage.SystemUsage-types]_ | {html}{html} | + | consumerSystemUsagePortion | _int_ | {html}{html} | + | dataDirectory | _java.lang.String_ | {html}Sets the directory in which the data files will be stored by default for +the JDBC and Journal persistence adaptors.{html} | + | dataDirectoryFile | _java.io.File_ | {html}Sets the directory in which the data files will be stored by default for +the JDBC and Journal persistence adaptors.{html} | + | dedicatedTaskRunner | _boolean_ | {html}{html} | + | deleteAllMessagesOnStartup | _java.lang.String_ | {html}Sets whether or not all messages are deleted on startup - mostly only +useful for testing.{html} | + | destinationFactory | _org.apache.activemq.broker.region.DestinationFactory_ | {html}{html} | + | destinationInterceptors | (_[org.apache.activemq.broker.region.DestinationInterceptor|#org.apache.activemq.broker.region.DestinationInterceptor-types]_)\* | {html}Sets the destination interceptors to use{html} | + | destinationPolicy | _[org.apache.activemq.broker.region.policy.PolicyMap|#org.apache.activemq.broker.region.policy.PolicyMap-types]_ | {html}Sets the destination specific policies available either for exact +destinations or for wildcard areas of destinations.{html} | + | destinations | (_[org.apache.activemq.command.ActiveMQDestination|#org.apache.activemq.command.ActiveMQDestination-types]_)\* | {html}Sets the destinations which should be loaded/created on startup{html} | + | enableStatistics | _boolean_ | {html}Sets whether or not the Broker's services enable statistics or not.{html} | + | ioExceptionHandler | _[org.apache.activemq.util.IOExceptionHandler|#org.apache.activemq.util.IOExceptionHandler-types]_ | {html}override the Default IOException handler, called when persistence adapter +has experiences File or JDBC I/O Exceptions{html} | + | jmsBridgeConnectors | (_[org.apache.activemq.network.jms.JmsConnector|#org.apache.activemq.network.jms.JmsConnector-types]_)\* | {html}{html} | + | keepDurableSubsActive | _boolean_ | {html}{html} | + | managementContext | _[org.apache.activemq.broker.jmx.ManagementContext|#org.apache.activemq.broker.jmx.ManagementContext-types]_ | {html}{html} | + | masterConnectorURI | _java.lang.String_ | {html}{html} | + | maxPurgedDestinationsPerSweep | _int_ | {html}{html} | + | mbeanInvocationTimeout | _long_ | {html}Gets the time in Milliseconds that an invocation of an MBean method will wait before +failing. The default value is to wait forever (zero).{html} | + | messageAuthorizationPolicy | _org.apache.activemq.security.MessageAuthorizationPolicy_ | {html}Sets the policy used to decide if the current connection is authorized to +consume a given message{html} | + | monitorConnectionSplits | _boolean_ | {html}{html} | + | networkConnectorStartAsync | _boolean_ | {html}{html} | + | networkConnectorURIs | (_java.lang.String_)\* | {html}{html} | + | networkConnectors | (_[org.apache.activemq.network.NetworkConnector|#org.apache.activemq.network.NetworkConnector-types]_)\* | {html}Sets the network connectors which this broker will use to connect to +other brokers in a federated network{html} | + | offlineDurableSubscriberTaskSchedule | _int_ | {html}{html} | + | offlineDurableSubscriberTimeout | _int_ | {html}{html} | + | passiveSlave | _java.lang.String_ | {html}Get the passiveSlave{html} | + | persistenceAdapter | _[org.apache.activemq.store.PersistenceAdapter|#org.apache.activemq.store.PersistenceAdapter-types]_ | {html}Sets the persistence adaptor implementation to use for this broker{html} | + | persistenceFactory | _[org.apache.activemq.store.PersistenceAdapterFactory|#org.apache.activemq.store.PersistenceAdapterFactory-types]_ | {html}{html} | + | persistenceTaskRunnerFactory | _[org.apache.activemq.thread.TaskRunnerFactory|#org.apache.activemq.thread.TaskRunnerFactory-types]_ | {html}{html} | + | persistenceThreadPriority | _int_ | {html}{html} | + | persistent | _java.lang.String_ | {html}Sets whether or not persistence is enabled or disabled.{html} | + | plugins | (_[org.apache.activemq.broker.BrokerPlugin|#org.apache.activemq.broker.BrokerPlugin-types]_)\* | {html}Sets a number of broker plugins to install such as for security +authentication or authorization{html} | + | populateJMSXUserID | _boolean_ | {html}Sets whether or not the broker should populate the JMSXUserID header.{html} | + | populateUserNameInMBeans | _boolean_ | {html}Should MBeans that support showing the Authenticated User Name information have this +value filled in or not.{html} | + | producerSystemUsage | _[org.apache.activemq.usage.SystemUsage|#org.apache.activemq.usage.SystemUsage-types]_ | {html}{html} | + | producerSystemUsagePortion | _int_ | {html}{html} | + | proxyConnectors | (_java.lang.Object_)\* | {html}Sets the network connectors which this broker will use to connect to +other brokers in a federated network{html} | + | regionBroker | _[org.apache.activemq.broker.Broker|#org.apache.activemq.broker.Broker-types]_ | {html}{html} | + | schedulePeriodForDestinationPurge | _int_ | {html}{html} | + | schedulerDirectory | _java.lang.String_ | {html}{html} | + | schedulerDirectoryFile | _java.io.File_ | {html}{html} | + | schedulerSupport | _java.lang.String_ | {html}{html} | + | services | (_[org.apache.activemq.Service|#org.apache.activemq.Service-types]_)\* | {html}Sets the services associated with this broker such as a +{@link MasterConnector}{html} | + | shutdownHooks | (_java.lang.Object_)\* | {html}Sets hooks to be executed when broker shut down{html} | + | shutdownOnMasterFailure | _boolean_ | {html}{html} | + | shutdownOnSlaveFailure | _java.lang.String_ | {html}{html} | + | splitSystemUsageForProducersConsumers | _boolean_ | {html}{html} | + | sslContext | _[org.apache.activemq.broker.SslContext|#org.apache.activemq.broker.SslContext-types]_ | {html}{html} | + | startAsync | _boolean_ | {html}{html} | + | supportFailOver | _boolean_ | {html}{html} | + | systemExitOnShutdown | _java.lang.String_ | {html}{html} | + | systemExitOnShutdownExitCode | _int_ | {html}{html} | + | systemUsage | _[org.apache.activemq.usage.SystemUsage|#org.apache.activemq.usage.SystemUsage-types]_ | {html}{html} | + | taskRunnerFactory | _[org.apache.activemq.thread.TaskRunnerFactory|#org.apache.activemq.thread.TaskRunnerFactory-types]_ | {html}{html} | + | taskRunnerPriority | _int_ | {html}{html} | + | tempDataStore | _[org.apache.activemq.store.kahadb.plist.PListStore|#org.apache.activemq.store.kahadb.plist.PListStore-types]_ | {html}{html} | + | timeBeforePurgeTempDestinations | _int_ | {html}{html} | + | tmpDataDirectory | _java.io.File_ | {html}{html} | + | transportConnectorURIs | (_java.lang.String_)\* | {html}{html} | + | transportConnectors | (_[org.apache.activemq.broker.TransportConnector|#org.apache.activemq.broker.TransportConnector-types]_)\* | {html}Sets the transport connectors which this broker will listen on for new +clients{html} | + | useAuthenticatedPrincipalForJMSXUserID | _boolean_ | {html}{html} | + | useJmx | _java.lang.String_ | {html}Sets whether or not the Broker's services should be exposed into JMX or +not.{html} | + | useLocalHostBrokerName | _boolean_ | {html}{html} | + | useLoggingForShutdownErrors | _boolean_ | {html}Sets whether or not we should use commons-logging when reporting errors +when shutting down the broker{html} | + | useMirroredQueues | _boolean_ | {html}Sets whether or not Mirrored +Queues should be supported by default if they have not been +explicitly configured.{html} | + | useShutdownHook | _boolean_ | {html}Sets whether or not we should use a shutdown handler to close down the +broker cleanly if the JVM is terminated. It is recommended you leave this +enabled.{html} | + | useTempMirroredQueues | _boolean_ | {html}{html} | + | useVirtualTopics | _boolean_ | {html}Sets whether or not Virtual +Topics should be supported by default if they have not been +explicitly configured.{html} | + | vmConnectorURI | _java.net.URI_ | {html}{html} | + | waitForSlave | _java.lang.String_ | {html}{html} | + | waitForSlaveTimeout | _long_ | {html}{html} | + +{anchor:bytesJDBCAdapter-element} +h3. The _[|#bytesJDBCAdapter-element]_ Element + {html}This JDBCAdapter inserts and extracts BLOB data using the +setBytes()/getBytes() operations. The databases/JDBC drivers that use this +adapter are:{html} +h4. Properties + || Property Name || Type || Description || + | batchStatments | _boolean_ | {html}{html} | + | maxRows | _int_ | {html}{html} | + | statements | _[org.apache.activemq.store.jdbc.Statements|#org.apache.activemq.store.jdbc.Statements-types]_ | {html}{html} | + | useExternalMessageReferences | _boolean_ | {html}{html} | + +{anchor:cachedLDAPAuthorizationMap-element} +h3. The _[|#cachedLDAPAuthorizationMap-element]_ Element + {html}A {@link DefaultAuthorizationMap} implementation which uses LDAP to initialize and update authorization +policy.{html} +h4. Properties + || Property Name || Type || Description || + | adminPermissionGroupSearchFilter | _java.lang.String_ | {html}{html} | + | authentication | _java.lang.String_ | {html}{html} | + | authorizationEntries | (_java.lang.Object_)\* | {html}Sets the individual entries on the authorization map{html} | + | connectionPassword | _java.lang.String_ | {html}{html} | + | connectionProtocol | _java.lang.String_ | {html}{html} | + | connectionURL | _java.lang.String_ | {html}{html} | + | connectionUsername | _java.lang.String_ | {html}{html} | + | defaultEntry | _[org.apache.activemq.security.AuthorizationEntry|#org.apache.activemq.security.AuthorizationEntry-types]_ | {html}{html} | + | entries | (_java.lang.Object_)\* | {html}A helper method to allow the destination map to be populated from a +dependency injection framework such as Spring{html} | + | groupNameAttribute | _java.lang.String_ | {html}{html} | + | groupObjectClass | _java.lang.String_ | {html}{html} | + | legacyGroupMapping | _boolean_ | {html}{html} | + | permissionGroupMemberAttribute | _java.lang.String_ | {html}{html} | + | queueSearchBase | _java.lang.String_ | {html}{html} | + | readPermissionGroupSearchFilter | _java.lang.String_ | {html}{html} | + | refreshDisabled | _boolean_ | {html}{html} | + | refreshInterval | _int_ | {html}{html} | + | tempDestinationAuthorizationEntry | _[org.apache.activemq.security.TempDestinationAuthorizationEntry|#org.apache.activemq.security.TempDestinationAuthorizationEntry-types]_ | {html}{html} | + | tempSearchBase | _java.lang.String_ | {html}{html} | + | topicSearchBase | _java.lang.String_ | {html}{html} | + | userNameAttribute | _java.lang.String_ | {html}{html} | + | userObjectClass | _java.lang.String_ | {html}{html} | + | writePermissionGroupSearchFilter | _java.lang.String_ | {html}{html} | + +{anchor:commandAgent-element} +h3. The _[|#commandAgent-element]_ Element + {html}An agent which listens to commands on a JMS destination{html} +h4. Properties + || Property Name || Type || Description || + | brokerUrl | _java.lang.String_ | {html}{html} | + | commandDestination | _[javax.jms.Destination|#javax.jms.Destination-types]_ | {html}{html} | + | connection | _javax.jms.Connection_ | {html}{html} | + | connectionFactory | _[javax.jms.ConnectionFactory|#javax.jms.ConnectionFactory-types]_ | {html}{html} | + | password | _java.lang.String_ | {html}{html} | + | username | _java.lang.String_ | {html}{html} | + +{anchor:compositeDemandForwardingBridge-element} +h3. The _[|#compositeDemandForwardingBridge-element]_ Element + {html}A demand forwarding bridge which works with multicast style transports where +a single Transport could be communicating with multiple remote brokers{html} +h4. Properties + || Property Name || Type || Description || + | brokerService | _[org.apache.activemq.broker.BrokerService|#org.apache.activemq.broker.BrokerService-types]_ | {html}{html} | + | configuration | _[org.apache.activemq.network.NetworkBridgeConfiguration|#org.apache.activemq.network.NetworkBridgeConfiguration-types]_ | {html}{html} | + | createdByDuplex | _boolean_ | {html}{html} | + | durableDestinations | (_[org.apache.activemq.command.ActiveMQDestination|#org.apache.activemq.command.ActiveMQDestination-types]_)\* | {html}{html} | + | dynamicallyIncludedDestinations | (_[org.apache.activemq.command.ActiveMQDestination|#org.apache.activemq.command.ActiveMQDestination-types]_)\* | {html}{html} | + | excludedDestinations | (_[org.apache.activemq.command.ActiveMQDestination|#org.apache.activemq.command.ActiveMQDestination-types]_)\* | {html}{html} | + | localBroker | _org.apache.activemq.transport.Transport_ | {html}{html} | + | mbeanObjectName | _javax.management.ObjectName_ | {html}{html} | + | networkBridgeListener | _org.apache.activemq.network.NetworkBridgeListener_ | {html}{html} | + | remoteBroker | _org.apache.activemq.transport.Transport_ | {html}{html} | + | staticallyIncludedDestinations | (_[org.apache.activemq.command.ActiveMQDestination|#org.apache.activemq.command.ActiveMQDestination-types]_)\* | {html}{html} | + +{anchor:compositeQueue-element} +h3. The _[|#compositeQueue-element]_ Element + {html}Represents a virtual queue which forwards to a number of other destinations.{html} +h4. Properties + || Property Name || Type || Description || + | copyMessage | _boolean_ | {html}Sets whether a copy of the message will be sent to each destination. +Defaults to true so that the forward destination is set as the +destination of the message{html} | + | forwardOnly | _boolean_ | {html}Sets if the virtual destination is forward only (and so there is no +physical queue to match the virtual queue) or if there is also a physical +queue with the same name).{html} | + | forwardTo | (_java.lang.Object_)\* | {html}Sets the list of destinations to forward to{html} | + | name | _java.lang.String_ | {html}Sets the name of this composite destination{html} | + +{anchor:compositeTopic-element} +h3. The _[|#compositeTopic-element]_ Element + {html}Represents a virtual topic which forwards to a number of other destinations.{html} +h4. Properties + || Property Name || Type || Description || + | copyMessage | _boolean_ | {html}Sets whether a copy of the message will be sent to each destination. +Defaults to true so that the forward destination is set as the +destination of the message{html} | + | forwardOnly | _boolean_ | {html}Sets if the virtual destination is forward only (and so there is no +physical queue to match the virtual queue) or if there is also a physical +queue with the same name).{html} | + | forwardTo | (_java.lang.Object_)\* | {html}Sets the list of destinations to forward to{html} | + | name | _java.lang.String_ | {html}Sets the name of this composite destination{html} | + +{anchor:conditionalNetworkBridgeFilterFactory-element} +h3. The _[|#conditionalNetworkBridgeFilterFactory-element]_ Element + {html}implement conditional behaviour for queue consumers, +allows replaying back to origin if no consumers are present on the local broker +after a configurable delay, irrespective of the networkTTL +Also allows rate limiting of messages through the network, useful for static includes{html} +h4. Properties + || Property Name || Type || Description || + | rateDuration | _int_ | {html}{html} | + | rateLimit | _int_ | {html}{html} | + | replayDelay | _int_ | {html}{html} | + | replayWhenNoConsumers | _boolean_ | {html}{html} | + +{anchor:connectionDotFilePlugin-element} +h3. The _[|#connectionDotFilePlugin-element]_ Element + {html}A DOT file creator plugin which +creates a DOT file showing the current connections{html} +h4. Properties + || Property Name || Type || Description || + | file | _java.lang.String_ | {html}Sets the destination file name to create the destination diagram{html} | + +{anchor:connectionFactory-element} +h3. The _[|#connectionFactory-element]_ Element + {html}A Spring enhanced connection +factory which will automatically use the Spring bean name as the clientIDPrefix property +so that connections created have client IDs related to your Spring.xml file for +easier comprehension from JMX.{html} +h4. Properties + || Property Name || Type || Description || + | alwaysSessionAsync | _boolean_ | {html}If this flag is not set then a separate thread is not used for dispatching messages for each Session in +the Connection. However, a separate thread is always used if there is more than one session, or the session +isn't in auto acknowledge or duplicates ok mode. By default this value is set to true and session dispatch +happens asynchronously.{html} | + | alwaysSyncSend | _boolean_ | {html}Set true if always require messages to be sync sent{html} | + | auditDepth | _int_ | {html}{html} | + | auditMaximumProducerNumber | _int_ | {html}{html} | + | beanName | _java.lang.String_ | {html}{html} | + | blobTransferPolicy | _org.apache.activemq.blob.BlobTransferPolicy_ | {html}Sets the policy used to describe how out-of-band BLOBs (Binary Large +OBjects) are transferred from producers to brokers to consumers{html} | + | brokerURL | _java.lang.String_ | {html}Sets the connection +URL used to connect to the ActiveMQ broker.{html} | + | checkForDuplicates | _boolean_ | {html}{html} | + | clientID | _java.lang.String_ | {html}Sets the JMS clientID to use for the created connection. Note that this +can only be used by one connection at once so generally its a better idea +to set the clientID on a Connection{html} | + | clientIDPrefix | _java.lang.String_ | {html}Sets the prefix used by autogenerated JMS Client ID values which are used +if the JMS client does not explicitly specify on.{html} | + | clientIdGenerator | _org.apache.activemq.util.IdGenerator_ | {html}{html} | + | clientInternalExceptionListener | _org.apache.activemq.ClientInternalExceptionListener_ | {html}Allows an {@link ClientInternalExceptionListener} to be configured on the ConnectionFactory so that when this factory +is used by frameworks which don't expose the Connection such as Spring JmsTemplate, you can register +an exception listener. +

    Note: access to this clientInternalExceptionListener will not be serialized if it is associated with more than +on connection (as it will be if more than one connection is subsequently created by this connection factory){html} | + | closeTimeout | _int_ | {html}Sets the timeout before a close is considered complete. Normally a +close() on a connection waits for confirmation from the broker; this +allows that operation to timeout to save the client hanging if there is +no broker{html} | + | connectionIDPrefix | _java.lang.String_ | {html}Sets the prefix used by connection id generator{html} | + | connectionIdGenerator | _org.apache.activemq.util.IdGenerator_ | {html}{html} | + | consumerFailoverRedeliveryWaitPeriod | _long_ | {html}{html} | + | copyMessageOnSend | _boolean_ | {html}Should a JMS message be copied to a new JMS Message object as part of the +send() method in JMS. This is enabled by default to be compliant with the +JMS specification. You can disable it if you do not mutate JMS messages +after they are sent for a performance boost{html} | + | disableTimeStampsByDefault | _boolean_ | {html}Sets whether or not timestamps on messages should be disabled or not. If +you disable them it adds a small performance boost.{html} | + | dispatchAsync | _boolean_ | {html}Enables or disables the default setting of whether or not consumers have +their messages dispatched +synchronously or asynchronously by the broker. For non-durable +topics for example we typically dispatch synchronously by default to +minimize context switches which boost performance. However sometimes its +better to go slower to ensure that a single blocked consumer socket does +not block delivery to other consumers.{html} | + | exceptionListener | _[javax.jms.ExceptionListener|#javax.jms.ExceptionListener-types]_ | {html}Allows an {@link ExceptionListener} to be configured on the ConnectionFactory so that when this factory +is used by frameworks which don't expose the Connection such as Spring JmsTemplate, you can register +an exception listener. +

    Note: access to this exceptionLinstener will not be serialized if it is associated with more than +on connection (as it will be if more than one connection is subsequently created by this connection factory){html} | + | exclusiveConsumer | _boolean_ | {html}Enables or disables whether or not queue consumers should be exclusive or +not for example to preserve ordering when not using Message Groups{html} | + | maxThreadPoolSize | _int_ | {html}{html} | + | messagePrioritySupported | _boolean_ | {html}{html} | + | nestedMapAndListEnabled | _boolean_ | {html}Enables/disables whether or not Message properties and MapMessage entries +support Nested +Structures of Map and List objects{html} | + | nonBlockingRedelivery | _boolean_ | {html}When true a MessageConsumer will not stop Message delivery before re-delivering Messages +from a rolled back transaction. This implies that message order will not be preserved and +also will result in the TransactedIndividualAck option to be enabled.{html} | + | objectMessageSerializationDefered | _boolean_ | {html}When an object is set on an ObjectMessage, the JMS spec requires the +object to be serialized by that set method. Enabling this flag causes the +object to not get serialized. The object may subsequently get serialized +if the message needs to be sent over a socket or stored to disk.{html} | + | optimizeAcknowledge | _boolean_ | {html}{html} | + | optimizeAcknowledgeTimeOut | _long_ | {html}The max time in milliseconds between optimized ack batches{html} | + | optimizedAckScheduledAckInterval | _long_ | {html}Gets the configured time interval that is used to force all MessageConsumers that have optimizedAcknowledge enabled +to send an ack for any outstanding Message Acks. By default this value is set to zero meaning that the consumers +will not do any background Message acknowledgment.{html} | + | optimizedMessageDispatch | _boolean_ | {html}If this flag is set then an larger prefetch limit is used - only +applicable for durable topic subscribers.{html} | + | password | _java.lang.String_ | {html}Sets the JMS password used for connections created from this factory{html} | + | prefetchPolicy | _[org.apache.activemq.ActiveMQPrefetchPolicy|#org.apache.activemq.ActiveMQPrefetchPolicy-types]_ | {html}Sets the prefetch +policy for consumers created by this connection.{html} | + | producerWindowSize | _int_ | {html}{html} | + | properties | _java.util.Properties_ | {html}Get the properties from this instance for storing in JNDI{html} | + | redeliveryPolicy | _[org.apache.activemq.RedeliveryPolicy|#org.apache.activemq.RedeliveryPolicy-types]_ | {html}Sets the global default redelivery policy to be used when a message is delivered +but the session is rolled back{html} | + | redeliveryPolicyMap | _[org.apache.activemq.broker.region.policy.RedeliveryPolicyMap|#org.apache.activemq.broker.region.policy.RedeliveryPolicyMap-types]_ | {html}Sets the global redelivery policy mapping to be used when a message is delivered +but the session is rolled back{html} | + | rejectedTaskHandler | _java.util.concurrent.RejectedExecutionHandler_ | {html}{html} | + | sendAcksAsync | _boolean_ | {html}{html} | + | sendTimeout | _int_ | {html}{html} | + | sessionTaskRunner | _[org.apache.activemq.thread.TaskRunnerFactory|#org.apache.activemq.thread.TaskRunnerFactory-types]_ | {html}{html} | + | statsEnabled | _boolean_ | {html}{html} | + | transactedIndividualAck | _boolean_ | {html}when true, submit individual transacted acks immediately rather than with transaction completion. +This allows the acks to represent delivery status which can be persisted on rollback +Used in conjunction with org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter#setRewriteOnRedelivery(boolean) true{html} | + | transformer | _org.apache.activemq.MessageTransformer_ | {html}Sets the transformer used to transform messages before they are sent on +to the JMS bus or when they are received from the bus but before they are +delivered to the JMS client{html} | + | transportListener | _org.apache.activemq.transport.TransportListener_ | {html}Allows a listener to be configured on the ConnectionFactory so that when this factory is used +with frameworks which don't expose the Connection such as Spring JmsTemplate, you can still register +a transport listener.{html} | + | useAsyncSend | _boolean_ | {html}Forces the use of Async Sends which +adds a massive performance boost; but means that the send() method will +return immediately whether the message has been sent or not which could +lead to message loss.{html} | + | useBeanNameAsClientIdPrefix | _boolean_ | {html}{html} | + | useCompression | _boolean_ | {html}Enables the use of compression of the message bodies{html} | + | useDedicatedTaskRunner | _boolean_ | {html}{html} | + | useRetroactiveConsumer | _boolean_ | {html}Sets whether or not retroactive consumers are enabled. Retroactive +consumers allow non-durable topic subscribers to receive old messages +that were published before the non-durable subscriber started.{html} | + | userName | _java.lang.String_ | {html}Sets the JMS userName used by connections created by this factory{html} | + | warnAboutUnstartedConnectionTimeout | _long_ | {html}Enables the timeout from a connection creation to when a warning is +generated if the connection is not properly started via +{@link Connection#start()} and a message is received by a consumer. It is +a very common gotcha to forget to start +the connection so this option makes the default case to create a +warning if the user forgets. To disable the warning just set the value to < +0 (say -1).{html} | + | watchTopicAdvisories | _boolean_ | {html}{html} | + +{anchor:constantPendingMessageLimitStrategy-element} +h3. The _[|#constantPendingMessageLimitStrategy-element]_ Element + {html}This PendingMessageLimitStrategy is configured to a constant value for all subscriptions.{html} +h4. Properties + || Property Name || Type || Description || + | limit | _int_ | {html}{html} | + +{anchor:database-locker-element} +h3. The _[|#database-locker-element]_ Element + {html}Represents an exclusive lock on a database to avoid multiple brokers running +against the same logical database.{html} +h4. Properties + || Property Name || Type || Description || + | exceptionHandler | _org.apache.activemq.util.Handler_ | {html}{html} | + | failIfLocked | _boolean_ | {html}{html} | + | lockAcquireSleepInterval | _long_ | {html}{html} | + | name | _java.lang.String_ | {html}{html} | + | queryTimeout | _int_ | {html}{html} | + +{anchor:db2JDBCAdapter-element} +h3. The _[|#db2JDBCAdapter-element]_ Element + {html}{html} +h4. Properties + || Property Name || Type || Description || + | batchStatments | _boolean_ | {html}{html} | + | maxRows | _int_ | {html}{html} | + | statements | _[org.apache.activemq.store.jdbc.Statements|#org.apache.activemq.store.jdbc.Statements-types]_ | {html}{html} | + | useExternalMessageReferences | _boolean_ | {html}{html} | + +{anchor:defaultIOExceptionHandler-element} +h3. The _[|#defaultIOExceptionHandler-element]_ Element + {html}{html} +h4. Properties + || Property Name || Type || Description || + | brokerService | _[org.apache.activemq.broker.BrokerService|#org.apache.activemq.broker.BrokerService-types]_ | {html}{html} | + | ignoreAllErrors | _boolean_ | {html}{html} | + | ignoreNoSpaceErrors | _boolean_ | {html}{html} | + | ignoreSQLExceptions | _boolean_ | {html}{html} | + | noSpaceMessage | _java.lang.String_ | {html}{html} | + | resumeCheckSleepPeriod | _long_ | {html}{html} | + | sqlExceptionMessage | _java.lang.String_ | {html}{html} | + | stopStartConnectors | _boolean_ | {html}{html} | + +{anchor:defaultJDBCAdapter-element} +h3. The _[|#defaultJDBCAdapter-element]_ Element + {html}Implements all the default JDBC operations that are used by the JDBCPersistenceAdapter.

    sub-classing is +encouraged to override the default implementation of methods to account for differences in JDBC Driver +implementations.

    The JDBCAdapter inserts and extracts BLOB data using the getBytes()/setBytes() operations.

    +The databases/JDBC drivers that use this adapter are: +

      +
    • +
    {html} +h4. Properties + || Property Name || Type || Description || + | batchStatments | _boolean_ | {html}{html} | + | maxRows | _int_ | {html}{html} | + | statements | _[org.apache.activemq.store.jdbc.Statements|#org.apache.activemq.store.jdbc.Statements-types]_ | {html}{html} | + | useExternalMessageReferences | _boolean_ | {html}{html} | + +{anchor:defaultNetworkBridgeFilterFactory-element} +h3. The _[|#defaultNetworkBridgeFilterFactory-element]_ Element + {html}implement default behaviour, filter that will not allow resend to origin +based on brokerPath and which respects networkTTL{html} + +{anchor:defaultUsageCapacity-element} +h3. The _[|#defaultUsageCapacity-element]_ Element + {html}Identify if a limit has been reached{html} +h4. Properties + || Property Name || Type || Description || + | limit | _long_ | {html}{html} | + +{anchor:demandForwardingBridge-element} +h3. The _[|#demandForwardingBridge-element]_ Element + {html}Forwards messages from the local broker to the remote broker based on demand.{html} +h4. Properties + || Property Name || Type || Description || + | brokerService | _[org.apache.activemq.broker.BrokerService|#org.apache.activemq.broker.BrokerService-types]_ | {html}{html} | + | configuration | _[org.apache.activemq.network.NetworkBridgeConfiguration|#org.apache.activemq.network.NetworkBridgeConfiguration-types]_ | {html}{html} | + | createdByDuplex | _boolean_ | {html}{html} | + | durableDestinations | (_[org.apache.activemq.command.ActiveMQDestination|#org.apache.activemq.command.ActiveMQDestination-types]_)\* | {html}{html} | + | dynamicallyIncludedDestinations | (_[org.apache.activemq.command.ActiveMQDestination|#org.apache.activemq.command.ActiveMQDestination-types]_)\* | {html}{html} | + | excludedDestinations | (_[org.apache.activemq.command.ActiveMQDestination|#org.apache.activemq.command.ActiveMQDestination-types]_)\* | {html}{html} | + | localBroker | _org.apache.activemq.transport.Transport_ | {html}{html} | + | mbeanObjectName | _javax.management.ObjectName_ | {html}{html} | + | networkBridgeListener | _org.apache.activemq.network.NetworkBridgeListener_ | {html}{html} | + | remoteBroker | _org.apache.activemq.transport.Transport_ | {html}{html} | + | staticallyIncludedDestinations | (_[org.apache.activemq.command.ActiveMQDestination|#org.apache.activemq.command.ActiveMQDestination-types]_)\* | {html}{html} | + +{anchor:destinationDotFilePlugin-element} +h3. The _[|#destinationDotFilePlugin-element]_ Element + {html}A DOT +file creator plugin which creates a DOT file showing the current topic & queue hierarchies.{html} +h4. Properties + || Property Name || Type || Description || + | file | _java.lang.String_ | {html}Sets the destination file name to create the destination diagram{html} | + +{anchor:destinationEntry-element} +h3. The _[|#destinationEntry-element]_ Element + {html}A default entry in a DestinationMap which holds a single value.{html} +h4. Properties + || Property Name || Type || Description || + | destination | _[org.apache.activemq.command.ActiveMQDestination|#org.apache.activemq.command.ActiveMQDestination-types]_ | {html}{html} | + | queue | _java.lang.String_ | {html}A helper method to set the destination from a configuration file{html} | + | tempQueue | _boolean_ | {html}{html} | + | tempTopic | _boolean_ | {html}{html} | + | topic | _java.lang.String_ | {html}A helper method to set the destination from a configuration file{html} | + | value | _[org.apache.activemq.filter.DestinationMapEntry|#org.apache.activemq.filter.DestinationMapEntry-types]_ | {html}{html} | + +{anchor:destinationPathSeparatorPlugin-element} +h3. The _[|#destinationPathSeparatorPlugin-element]_ Element + {html}{html} +h4. Properties + || Property Name || Type || Description || + | adminConnectionContext | _org.apache.activemq.broker.ConnectionContext_ | {html}{html} | + | next | _[org.apache.activemq.broker.Broker|#org.apache.activemq.broker.Broker-types]_ | {html}{html} | + | pathSeparator | _java.lang.String_ | {html}{html} | + +{anchor:discardingDLQBrokerPlugin-element} +h3. The _[|#discardingDLQBrokerPlugin-element]_ Element + {html}{html} +h4. Properties + || Property Name || Type || Description || + | dropAll | _boolean_ | {html}{html} | + | dropOnly | _java.lang.String_ | {html}{html} | + | dropTemporaryQueues | _boolean_ | {html}{html} | + | dropTemporaryTopics | _boolean_ | {html}{html} | + | reportInterval | _int_ | {html}{html} | + +{anchor:fileCursor-element} +h3. The _[|#fileCursor-element]_ Element + {html}Pending messages{html} + +{anchor:fileDurableSubscriberCursor-element} +h3. The _[|#fileDurableSubscriberCursor-element]_ Element + {html}Pending messages for durable subscribers{html} + +{anchor:fileQueueCursor-element} +h3. The _[|#fileQueueCursor-element]_ Element + {html}Pending{html} + +{anchor:filteredDestination-element} +h3. The _[|#filteredDestination-element]_ Element + {html}Represents a destination which is filtered using some predicate such as a selector +so that messages are only dispatched to the destination if they match the filter.{html} +h4. Properties + || Property Name || Type || Description || + | destination | _[org.apache.activemq.command.ActiveMQDestination|#org.apache.activemq.command.ActiveMQDestination-types]_ | {html}The destination to send messages to if they match the filter{html} | + | filter | _org.apache.activemq.filter.BooleanExpression_ | {html}{html} | + | queue | _java.lang.String_ | {html}Sets the destination property to the given queue name{html} | + | selector | _java.lang.String_ | {html}Sets the JMS selector used to filter messages before forwarding them to this destination{html} | + | topic | _java.lang.String_ | {html}Sets the destination property to the given topic name{html} | + +{anchor:filteredKahaDB-element} +h3. The _[|#filteredKahaDB-element]_ Element + {html}{html} +h4. Properties + || Property Name || Type || Description || + | adapter | _[org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter|#org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter-types]_ | {html}{html} | + | destination | _[org.apache.activemq.command.ActiveMQDestination|#org.apache.activemq.command.ActiveMQDestination-types]_ | {html}{html} | + | perDestination | _boolean_ | {html}{html} | + | persistenceAdapter | _[org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter|#org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter-types]_ | {html}{html} | + | queue | _java.lang.String_ | {html}A helper method to set the destination from a configuration file{html} | + | tempQueue | _boolean_ | {html}{html} | + | tempTopic | _boolean_ | {html}{html} | + | topic | _java.lang.String_ | {html}A helper method to set the destination from a configuration file{html} | + +{anchor:fixedCountSubscriptionRecoveryPolicy-element} +h3. The _[|#fixedCountSubscriptionRecoveryPolicy-element]_ Element + {html}This implementation of {@link SubscriptionRecoveryPolicy} will keep a fixed +count of last messages.{html} +h4. Properties + || Property Name || Type || Description || + | broker | _[org.apache.activemq.broker.Broker|#org.apache.activemq.broker.Broker-types]_ | {html}{html} | + | maximumSize | _int_ | {html}Sets the maximum number of messages that this destination will hold +around in RAM{html} | + +{anchor:fixedSizedSubscriptionRecoveryPolicy-element} +h3. The _[|#fixedSizedSubscriptionRecoveryPolicy-element]_ Element + {html}This implementation of {@link SubscriptionRecoveryPolicy} will keep a fixed +amount of memory available in RAM for message history which is evicted in +time order.{html} +h4. Properties + || Property Name || Type || Description || + | broker | _[org.apache.activemq.broker.Broker|#org.apache.activemq.broker.Broker-types]_ | {html}{html} | + | buffer | _org.apache.activemq.memory.list.MessageList_ | {html}{html} | + | maximumSize | _int_ | {html}Sets the maximum amount of RAM in bytes that this buffer can hold in RAM{html} | + | useSharedBuffer | _boolean_ | {html}{html} | + +{anchor:forcePersistencyModeBroker-element} +h3. The _[|#forcePersistencyModeBroker-element]_ Element + {html}A Plugin which allows to force every incoming message to be PERSISTENT or NON-PERSISTENT. + +Useful, if you have set the broker usage policy to process ONLY persistent or ONLY non-persistent +messages.{html} +h4. Properties + || Property Name || Type || Description || + | adminConnectionContext | _org.apache.activemq.broker.ConnectionContext_ | {html}{html} | + | next | _[org.apache.activemq.broker.Broker|#org.apache.activemq.broker.Broker-types]_ | {html}{html} | + | persistenceFlag | _boolean_ | {html}{html} | + +{anchor:forcePersistencyModeBrokerPlugin-element} +h3. The _[|#forcePersistencyModeBrokerPlugin-element]_ Element + {html}A Plugin which allows to force every incoming message to be PERSISTENT or NON-PERSISTENT. + +Useful, if you have set the broker usage policy to process ONLY persistent or ONLY non-persistent +messages.{html} +h4. Properties + || Property Name || Type || Description || + | persistenceFlag | _boolean_ | {html}Sets the persistency mode.{html} | + +{anchor:forwardingBridge-element} +h3. The _[|#forwardingBridge-element]_ Element + {html}Forwards all messages from the local broker to the remote broker.{html} +h4. Properties + || Property Name || Type || Description || + | clientId | _java.lang.String_ | {html}{html} | + | destinationFilter | _java.lang.String_ | {html}{html} | + | dispatchAsync | _boolean_ | {html}{html} | + | localBroker | _org.apache.activemq.transport.Transport_ | {html}{html} | + | networkBridgeFailedListener | _org.apache.activemq.network.NetworkBridgeListener_ | {html}{html} | + | prefetchSize | _int_ | {html}{html} | + | remoteBroker | _org.apache.activemq.transport.Transport_ | {html}{html} | + | useCompression | _boolean_ | {html}{html} | + +{anchor:hsqldb-jdbc-adapter-element} +h3. The _[|#hsqldb-jdbc-adapter-element]_ Element + {html}{html} +h4. Properties + || Property Name || Type || Description || + | batchStatments | _boolean_ | {html}{html} | + | maxRows | _int_ | {html}{html} | + | statements | _[org.apache.activemq.store.jdbc.Statements|#org.apache.activemq.store.jdbc.Statements-types]_ | {html}{html} | + | useExternalMessageReferences | _boolean_ | {html}{html} | + +{anchor:imageBasedJDBCAdaptor-element} +h3. The _[|#imageBasedJDBCAdaptor-element]_ Element + {html}Provides JDBCAdapter since that uses +IMAGE datatype to hold binary data. + +The databases/JDBC drivers that use this adapter are: +
      +
    • Sybase
    • +
    • MS SQL
    • +
    {html} +h4. Properties + || Property Name || Type || Description || + | batchStatments | _boolean_ | {html}{html} | + | maxRows | _int_ | {html}{html} | + | statements | _[org.apache.activemq.store.jdbc.Statements|#org.apache.activemq.store.jdbc.Statements-types]_ | {html}{html} | + | useExternalMessageReferences | _boolean_ | {html}{html} | + +{anchor:inboundQueueBridge-element} +h3. The _[|#inboundQueueBridge-element]_ Element + {html}Create an Inbound Queue Bridge. By default this class uses the sname name for +both the inbound and outbound queue. This behavior can be overridden however +by using the setter methods to configure both the inbound and outboud queue names +separately.{html} +h4. Properties + || Property Name || Type || Description || + | consumer | _javax.jms.MessageConsumer_ | {html}{html} | + | consumerConnection | _javax.jms.QueueConnection_ | {html}{html} | + | consumerQueue | _[javax.jms.Queue|#javax.jms.Queue-types]_ | {html}{html} | + | doHandleReplyTo | _boolean_ | {html}{html} | + | inboundQueueName | _java.lang.String_ | {html}Sets the queue name used for the inbound queue, if the outbound queue +name has not been set, then this method uses the same name to configure +the outbound queue name.{html} | + | jmsConnector | _[org.apache.activemq.network.jms.JmsConnector|#org.apache.activemq.network.jms.JmsConnector-types]_ | {html}{html} | + | jmsMessageConvertor | _[org.apache.activemq.network.jms.JmsMesageConvertor|#org.apache.activemq.network.jms.JmsMesageConvertor-types]_ | {html}{html} | + | localQueueName | _java.lang.String_ | {html}{html} | + | producerConnection | _javax.jms.QueueConnection_ | {html}{html} | + | producerQueue | _[javax.jms.Queue|#javax.jms.Queue-types]_ | {html}{html} | + | selector | _java.lang.String_ | {html}{html} | + +{anchor:inboundTopicBridge-element} +h3. The _[|#inboundTopicBridge-element]_ Element + {html}Create an Inbound Topic Bridge. By default this class uses the topic name for +both the inbound and outbound topic. This behavior can be overridden however +by using the setter methods to configure both the inbound and outboud topic names +separately.{html} +h4. Properties + || Property Name || Type || Description || + | consumer | _javax.jms.MessageConsumer_ | {html}{html} | + | consumerConnection | _javax.jms.TopicConnection_ | {html}{html} | + | consumerName | _java.lang.String_ | {html}{html} | + | consumerTopic | _[javax.jms.Topic|#javax.jms.Topic-types]_ | {html}{html} | + | doHandleReplyTo | _boolean_ | {html}{html} | + | inboundTopicName | _java.lang.String_ | {html}Sets the topic name used for the inbound topic, if the outbound topic +name has not been set, then this method uses the same name to configure +the outbound topic name.{html} | + | jmsConnector | _[org.apache.activemq.network.jms.JmsConnector|#org.apache.activemq.network.jms.JmsConnector-types]_ | {html}{html} | + | jmsMessageConvertor | _[org.apache.activemq.network.jms.JmsMesageConvertor|#org.apache.activemq.network.jms.JmsMesageConvertor-types]_ | {html}{html} | + | localTopicName | _java.lang.String_ | {html}{html} | + | producerConnection | _javax.jms.TopicConnection_ | {html}{html} | + | producerTopic | _[javax.jms.Topic|#javax.jms.Topic-types]_ | {html}{html} | + | selector | _java.lang.String_ | {html}{html} | + +{anchor:individualDeadLetterStrategy-element} +h3. The _[|#individualDeadLetterStrategy-element]_ Element + {html}A {@link DeadLetterStrategy} where each destination has its own individual +DLQ using the subject naming hierarchy.{html} +h4. Properties + || Property Name || Type || Description || + | destinationPerDurableSubscriber | _boolean_ | {html}sets whether durable topic subscriptions are to get individual dead letter destinations. +When true, the DLQ is of the form 'topicPrefix.clientId:subscriptionName' +The default is false.{html} | + | enableAudit | _boolean_ | {html}{html} | + | processExpired | _boolean_ | {html}{html} | + | processNonPersistent | _boolean_ | {html}{html} | + | queuePrefix | _java.lang.String_ | {html}Sets the prefix to use for all dead letter queues for queue messages{html} | + | queueSuffix | _java.lang.String_ | {html}Sets the suffix to use for all dead letter queues for queue messages{html} | + | topicPrefix | _java.lang.String_ | {html}Sets the prefix to use for all dead letter queues for topic messages{html} | + | topicSuffix | _java.lang.String_ | {html}Sets the suffix to use for all dead letter queues for topic messages{html} | + | useQueueForQueueMessages | _boolean_ | {html}Sets whether a queue or topic should be used for queue messages sent to a +DLQ. The default is to use a Queue{html} | + | useQueueForTopicMessages | _boolean_ | {html}Sets whether a queue or topic should be used for topic messages sent to a +DLQ. The default is to use a Queue{html} | + +{anchor:informixJDBCAdapter-element} +h3. The _[|#informixJDBCAdapter-element]_ Element + {html}JDBC Adapter for Informix database. +Because Informix database restricts length of composite primary keys, length of +container name field and subscription id field must be reduced to 150 characters. +Therefore be sure not to use longer names for container name and subscription id than 150 characters.{html} +h4. Properties + || Property Name || Type || Description || + | batchStatments | _boolean_ | {html}{html} | + | maxRows | _int_ | {html}{html} | + | statements | _[org.apache.activemq.store.jdbc.Statements|#org.apache.activemq.store.jdbc.Statements-types]_ | {html}{html} | + | useExternalMessageReferences | _boolean_ | {html}{html} | + +{anchor:jDBCIOExceptionHandler-element} +h3. The _[|#jDBCIOExceptionHandler-element]_ Element + {html}{html} +h4. Properties + || Property Name || Type || Description || + | brokerService | _[org.apache.activemq.broker.BrokerService|#org.apache.activemq.broker.BrokerService-types]_ | {html}{html} | + | ignoreAllErrors | _boolean_ | {html}{html} | + | ignoreNoSpaceErrors | _boolean_ | {html}{html} | + | ignoreSQLExceptions | _boolean_ | {html}{html} | + | noSpaceMessage | _java.lang.String_ | {html}{html} | + | resumeCheckSleepPeriod | _long_ | {html}{html} | + | sqlExceptionMessage | _java.lang.String_ | {html}{html} | + | stopStartConnectors | _boolean_ | {html}{html} | + +{anchor:jaasAuthenticationPlugin-element} +h3. The _[|#jaasAuthenticationPlugin-element]_ Element + {html}Provides a JAAS based authentication plugin{html} +h4. Properties + || Property Name || Type || Description || + | configuration | _java.lang.String_ | {html}Sets the JAAS configuration domain name used{html} | + | discoverLoginConfig | _boolean_ | {html}Enables or disables the auto-discovery of the login.config file for JAAS to initialize itself. +This flag is enabled by default such that if the java.security.auth.login.config system property +is not defined then it is set to the location of the login.config file on the classpath.{html} | + +{anchor:jaasCertificateAuthenticationPlugin-element} +h3. The _[|#jaasCertificateAuthenticationPlugin-element]_ Element + {html}Provides a JAAS based SSL certificate authentication plugin{html} +h4. Properties + || Property Name || Type || Description || + | configuration | _java.lang.String_ | {html}Sets the JAAS configuration domain name used{html} | + | discoverLoginConfig | _boolean_ | {html}Enables or disables the auto-discovery of the login.config file for JAAS to initialize itself. +This flag is enabled by default such that if the java.security.auth.login.config system property +is not defined then it is set to the location of the login.config file on the classpath.{html} | + +{anchor:jaasDualAuthenticationPlugin-element} +h3. The _[|#jaasDualAuthenticationPlugin-element]_ Element + {html}Provides a JAAS based authentication plugin{html} +h4. Properties + || Property Name || Type || Description || + | configuration | _java.lang.String_ | {html}Sets the JAAS configuration domain name used{html} | + | discoverLoginConfig | _boolean_ | {html}Enables or disables the auto-discovery of the login.config file for JAAS to initialize itself. +This flag is enabled by default such that if the java.security.auth.login.config system property +is not defined then it is set to the location of the login.config file on the classpath.{html} | + | sslConfiguration | _java.lang.String_ | {html}Set the JAAS SSL configuration domain{html} | + +{anchor:jdbcPersistenceAdapter-element} +h3. The _[|#jdbcPersistenceAdapter-element]_ Element + {html}A {@link PersistenceAdapter} implementation using JDBC for persistence +storage. + +This persistence adapter will correctly remember prepared XA transactions, +but it will not keep track of local transaction commits so that operations +performed against the Message store are done as a single uow.{html} +h4. Properties + || Property Name || Type || Description || + | adapter | _[org.apache.activemq.store.jdbc.JDBCAdapter|#org.apache.activemq.store.jdbc.JDBCAdapter-types]_ | {html}{html} | + | auditRecoveryDepth | _int_ | {html}{html} | + | brokerName | _java.lang.String_ | {html}{html} | + | brokerService | _[org.apache.activemq.broker.BrokerService|#org.apache.activemq.broker.BrokerService-types]_ | {html}{html} | + | cleanupPeriod | _int_ | {html}Sets the number of milliseconds until the database is attempted to be +cleaned up for durable topics{html} | + | createTablesOnStartup | _boolean_ | {html}Sets whether or not tables are created on startup{html} | + | dataDirectory | _java.lang.String_ | {html}{html} | + | dataDirectoryFile | _java.io.File_ | {html}{html} | + | dataSource | _javax.sql.DataSource_ | {html}{html} | + | databaseLocker | _[org.apache.activemq.broker.Locker|#org.apache.activemq.broker.Locker-types]_ | {html}Sets the database locker strategy to use to lock the database on startup{html} | + | directory | _java.io.File_ | {html}{html} | + | ds | _javax.sql.DataSource_ | {html}{html} | + | enableAudit | _boolean_ | {html}{html} | + | lockAcquireSleepInterval | _long_ | {html}{html} | + | lockDataSource | _javax.sql.DataSource_ | {html}{html} | + | lockKeepAlivePeriod | _long_ | {html}{html} | + | locker | _[org.apache.activemq.broker.Locker|#org.apache.activemq.broker.Locker-types]_ | {html}{html} | + | maxAuditDepth | _int_ | {html}{html} | + | maxProducersToAudit | _int_ | {html}{html} | + | maxRows | _int_ | {html}{html} | + | scheduledThreadPoolExecutor | _java.util.concurrent.ScheduledThreadPoolExecutor_ | {html}{html} | + | statements | _[org.apache.activemq.store.jdbc.Statements|#org.apache.activemq.store.jdbc.Statements-types]_ | {html}{html} | + | transactionIsolation | _int_ | {html}set the Transaction isolation level to something other that TRANSACTION_READ_UNCOMMITTED +This allowable dirty isolation level may not be achievable in clustered DB environments +so a more restrictive and expensive option may be needed like TRANSACTION_REPEATABLE_READ +see isolation level constants in {@link java.sql.Connection}{html} | + | usageManager | _[org.apache.activemq.usage.SystemUsage|#org.apache.activemq.usage.SystemUsage-types]_ | {html}{html} | + | useDatabaseLock | _boolean_ | {html}{html} | + | useExternalMessageReferences | _boolean_ | {html}{html} | + | useLock | _boolean_ | {html}{html} | + | wireFormat | _org.apache.activemq.wireformat.WireFormat_ | {html}{html} | + +{anchor:jmsQueueConnector-element} +h3. The _[|#jmsQueueConnector-element]_ Element + {html}A Bridge to other JMS Queue providers{html} +h4. Properties + || Property Name || Type || Description || + | brokerService | _[org.apache.activemq.broker.BrokerService|#org.apache.activemq.broker.BrokerService-types]_ | {html}One way to configure the local connection - this is called by The +BrokerService when the Connector is embedded{html} | + | inboundMessageConvertor | _[org.apache.activemq.network.jms.JmsMesageConvertor|#org.apache.activemq.network.jms.JmsMesageConvertor-types]_ | {html}{html} | + | inboundQueueBridges | (_[org.apache.activemq.network.jms.InboundQueueBridge|#org.apache.activemq.network.jms.InboundQueueBridge-types]_)\* | {html}{html} | + | jndiLocalTemplate | _org.springframework.jndi.JndiTemplate_ | {html}{html} | + | jndiOutboundTemplate | _org.springframework.jndi.JndiTemplate_ | {html}{html} | + | localClientId | _java.lang.String_ | {html}{html} | + | localConnectionFactoryName | _java.lang.String_ | {html}{html} | + | localPassword | _java.lang.String_ | {html}{html} | + | localQueueConnection | _javax.jms.QueueConnection_ | {html}{html} | + | localQueueConnectionFactory | _[javax.jms.QueueConnectionFactory|#javax.jms.QueueConnectionFactory-types]_ | {html}{html} | + | localUsername | _java.lang.String_ | {html}{html} | + | name | _java.lang.String_ | {html}{html} | + | outboundClientId | _java.lang.String_ | {html}{html} | + | outboundMessageConvertor | _[org.apache.activemq.network.jms.JmsMesageConvertor|#org.apache.activemq.network.jms.JmsMesageConvertor-types]_ | {html}{html} | + | outboundPassword | _java.lang.String_ | {html}{html} | + | outboundQueueBridges | (_[org.apache.activemq.network.jms.OutboundQueueBridge|#org.apache.activemq.network.jms.OutboundQueueBridge-types]_)\* | {html}{html} | + | outboundQueueConnection | _javax.jms.QueueConnection_ | {html}{html} | + | outboundQueueConnectionFactory | _[javax.jms.QueueConnectionFactory|#javax.jms.QueueConnectionFactory-types]_ | {html}{html} | + | outboundQueueConnectionFactoryName | _java.lang.String_ | {html}{html} | + | outboundUsername | _java.lang.String_ | {html}{html} | + | preferJndiDestinationLookup | _boolean_ | {html}Sets whether the connector should prefer to first try to find a destination in JNDI before +using JMS semantics to create a Destination. By default the connector will first use JMS +semantics and then fall-back to JNDI lookup, setting this value to true will reverse that +ordering.{html} | + | reconnectionPolicy | _[org.apache.activemq.network.jms.ReconnectionPolicy|#org.apache.activemq.network.jms.ReconnectionPolicy-types]_ | {html}{html} | + | replyToDestinationCacheSize | _int_ | {html}{html} | + +{anchor:jmsTopicConnector-element} +h3. The _[|#jmsTopicConnector-element]_ Element + {html}A Bridge to other JMS Topic providers{html} +h4. Properties + || Property Name || Type || Description || + | brokerService | _[org.apache.activemq.broker.BrokerService|#org.apache.activemq.broker.BrokerService-types]_ | {html}One way to configure the local connection - this is called by The +BrokerService when the Connector is embedded{html} | + | inboundMessageConvertor | _[org.apache.activemq.network.jms.JmsMesageConvertor|#org.apache.activemq.network.jms.JmsMesageConvertor-types]_ | {html}{html} | + | inboundTopicBridges | (_[org.apache.activemq.network.jms.InboundTopicBridge|#org.apache.activemq.network.jms.InboundTopicBridge-types]_)\* | {html}{html} | + | jndiLocalTemplate | _org.springframework.jndi.JndiTemplate_ | {html}{html} | + | jndiOutboundTemplate | _org.springframework.jndi.JndiTemplate_ | {html}{html} | + | localClientId | _java.lang.String_ | {html}{html} | + | localConnectionFactoryName | _java.lang.String_ | {html}{html} | + | localPassword | _java.lang.String_ | {html}{html} | + | localTopicConnection | _javax.jms.TopicConnection_ | {html}{html} | + | localTopicConnectionFactory | _[javax.jms.TopicConnectionFactory|#javax.jms.TopicConnectionFactory-types]_ | {html}{html} | + | localUsername | _java.lang.String_ | {html}{html} | + | name | _java.lang.String_ | {html}{html} | + | outboundClientId | _java.lang.String_ | {html}{html} | + | outboundMessageConvertor | _[org.apache.activemq.network.jms.JmsMesageConvertor|#org.apache.activemq.network.jms.JmsMesageConvertor-types]_ | {html}{html} | + | outboundPassword | _java.lang.String_ | {html}{html} | + | outboundTopicBridges | (_[org.apache.activemq.network.jms.OutboundTopicBridge|#org.apache.activemq.network.jms.OutboundTopicBridge-types]_)\* | {html}{html} | + | outboundTopicConnection | _javax.jms.TopicConnection_ | {html}{html} | + | outboundTopicConnectionFactory | _[javax.jms.TopicConnectionFactory|#javax.jms.TopicConnectionFactory-types]_ | {html}{html} | + | outboundTopicConnectionFactoryName | _java.lang.String_ | {html}{html} | + | outboundUsername | _java.lang.String_ | {html}{html} | + | preferJndiDestinationLookup | _boolean_ | {html}Sets whether the connector should prefer to first try to find a destination in JNDI before +using JMS semantics to create a Destination. By default the connector will first use JMS +semantics and then fall-back to JNDI lookup, setting this value to true will reverse that +ordering.{html} | + | reconnectionPolicy | _[org.apache.activemq.network.jms.ReconnectionPolicy|#org.apache.activemq.network.jms.ReconnectionPolicy-types]_ | {html}{html} | + | replyToDestinationCacheSize | _int_ | {html}{html} | + +{anchor:journalPersistenceAdapter-element} +h3. The _[|#journalPersistenceAdapter-element]_ Element + {html}An implementation of {@link PersistenceAdapter} designed for use with a +{@link Journal} and then check pointing asynchronously on a timeout with some +other long term persistent storage.{html} +h4. Properties + || Property Name || Type || Description || + | brokerName | _java.lang.String_ | {html}{html} | + | brokerService | _[org.apache.activemq.broker.BrokerService|#org.apache.activemq.broker.BrokerService-types]_ | {html}{html} | + | directory | _java.io.File_ | {html}{html} | + | journal | _org.apache.activeio.journal.Journal_ | {html}{html} | + | longTermPersistence | _[org.apache.activemq.store.PersistenceAdapter|#org.apache.activemq.store.PersistenceAdapter-types]_ | {html}{html} | + | maxCheckpointMessageAddSize | _int_ | {html}{html} | + | maxCheckpointWorkers | _int_ | {html}{html} | + | persistenceAdapter | _[org.apache.activemq.store.PersistenceAdapter|#org.apache.activemq.store.PersistenceAdapter-types]_ | {html}{html} | + | taskRunnerFactory | _[org.apache.activemq.thread.TaskRunnerFactory|#org.apache.activemq.thread.TaskRunnerFactory-types]_ | {html}{html} | + | usageManager | _[org.apache.activemq.usage.SystemUsage|#org.apache.activemq.usage.SystemUsage-types]_ | {html}{html} | + | useExternalMessageReferences | _boolean_ | {html}{html} | + +{anchor:journalPersistenceAdapterFactory-element} +h3. The _[|#journalPersistenceAdapterFactory-element]_ Element + {html}Factory class that can create PersistenceAdapter objects.{html} +h4. Properties + || Property Name || Type || Description || + | adapter | _[org.apache.activemq.store.jdbc.JDBCAdapter|#org.apache.activemq.store.jdbc.JDBCAdapter-types]_ | {html}{html} | + | brokerService | _[org.apache.activemq.broker.BrokerService|#org.apache.activemq.broker.BrokerService-types]_ | {html}{html} | + | createTablesOnStartup | _boolean_ | {html}Sets whether or not tables are created on startup{html} | + | dataDirectory | _java.lang.String_ | {html}{html} | + | dataDirectoryFile | _java.io.File_ | {html}{html} | + | dataSource | _javax.sql.DataSource_ | {html}{html} | + | jdbcAdapter | _[org.apache.activemq.store.jdbc.JDBCPersistenceAdapter|#org.apache.activemq.store.jdbc.JDBCPersistenceAdapter-types]_ | {html}{html} | + | journal | _org.apache.activeio.journal.Journal_ | {html}{html} | + | journalArchiveDirectory | _java.io.File_ | {html}{html} | + | journalLogFileSize | _java.lang.String_ | {html}Sets the size of the journal log files +When set using Xbean, values of the form "20 Mb", "1024kb", and "1g" can be used{html} | + | journalLogFiles | _int_ | {html}Sets the number of journal log files to use{html} | + | journalThreadPriority | _int_ | {html}Sets the thread priority of the journal thread{html} | + | lockKeepAlivePeriod | _long_ | {html}{html} | + | locker | _[org.apache.activemq.broker.Locker|#org.apache.activemq.broker.Locker-types]_ | {html}{html} | + | statements | _[org.apache.activemq.store.jdbc.Statements|#org.apache.activemq.store.jdbc.Statements-types]_ | {html}{html} | + | taskRunnerFactory | _[org.apache.activemq.thread.TaskRunnerFactory|#org.apache.activemq.thread.TaskRunnerFactory-types]_ | {html}{html} | + | useDatabaseLock | _boolean_ | {html}Sets whether or not an exclusive database lock should be used to enable +JDBC Master/Slave. Enabled by default.{html} | + | useDedicatedTaskRunner | _boolean_ | {html}{html} | + | useJournal | _boolean_ | {html}Enables or disables the use of the journal. The default is to use the +journal{html} | + | useLock | _boolean_ | {html}{html} | + | useQuickJournal | _boolean_ | {html}Enables or disables the use of quick journal, which keeps messages in the +journal and just stores a reference to the messages in JDBC. Defaults to +false so that messages actually reside long term in the JDBC database.{html} | + +{anchor:journaledJDBC-element} +h3. The _[|#journaledJDBC-element]_ Element + {html}Creates a default persistence model using the Journal and JDBC{html} +h4. Properties + || Property Name || Type || Description || + | adapter | _[org.apache.activemq.store.jdbc.JDBCAdapter|#org.apache.activemq.store.jdbc.JDBCAdapter-types]_ | {html}{html} | + | brokerService | _[org.apache.activemq.broker.BrokerService|#org.apache.activemq.broker.BrokerService-types]_ | {html}{html} | + | createTablesOnStartup | _boolean_ | {html}Sets whether or not tables are created on startup{html} | + | dataDirectory | _java.lang.String_ | {html}{html} | + | dataDirectoryFile | _java.io.File_ | {html}{html} | + | dataSource | _javax.sql.DataSource_ | {html}{html} | + | jdbcAdapter | _[org.apache.activemq.store.jdbc.JDBCPersistenceAdapter|#org.apache.activemq.store.jdbc.JDBCPersistenceAdapter-types]_ | {html}{html} | + | journal | _org.apache.activeio.journal.Journal_ | {html}{html} | + | journalArchiveDirectory | _java.io.File_ | {html}{html} | + | journalLogFileSize | _java.lang.String_ | {html}Sets the size of the journal log files +When set using Xbean, values of the form "20 Mb", "1024kb", and "1g" can be used{html} | + | journalLogFiles | _int_ | {html}Sets the number of journal log files to use{html} | + | journalThreadPriority | _int_ | {html}Sets the thread priority of the journal thread{html} | + | lockKeepAlivePeriod | _long_ | {html}{html} | + | locker | _[org.apache.activemq.broker.Locker|#org.apache.activemq.broker.Locker-types]_ | {html}{html} | + | statements | _[org.apache.activemq.store.jdbc.Statements|#org.apache.activemq.store.jdbc.Statements-types]_ | {html}{html} | + | taskRunnerFactory | _[org.apache.activemq.thread.TaskRunnerFactory|#org.apache.activemq.thread.TaskRunnerFactory-types]_ | {html}{html} | + | useDatabaseLock | _boolean_ | {html}Sets whether or not an exclusive database lock should be used to enable +JDBC Master/Slave. Enabled by default.{html} | + | useDedicatedTaskRunner | _boolean_ | {html}{html} | + | useJournal | _boolean_ | {html}Enables or disables the use of the journal. The default is to use the +journal{html} | + | useLock | _boolean_ | {html}{html} | + | useQuickJournal | _boolean_ | {html}Enables or disables the use of quick journal, which keeps messages in the +journal and just stores a reference to the messages in JDBC. Defaults to +false so that messages actually reside long term in the JDBC database.{html} | + +{anchor:kahaDB-element} +h3. The _[|#kahaDB-element]_ Element + {html}An implementation of {@link PersistenceAdapter} designed for use with +KahaDB - Embedded Lightweight Non-Relational Database{html} +h4. Properties + || Property Name || Type || Description || + | archiveCorruptedIndex | _boolean_ | {html}{html} | + | archiveDataLogs | _boolean_ | {html}{html} | + | brokerName | _java.lang.String_ | {html}{html} | + | brokerService | _[org.apache.activemq.broker.BrokerService|#org.apache.activemq.broker.BrokerService-types]_ | {html}{html} | + | checkForCorruptJournalFiles | _boolean_ | {html}{html} | + | checkpointInterval | _long_ | {html}Get the checkpointInterval{html} | + | checksumJournalFiles | _boolean_ | {html}{html} | + | cleanupInterval | _long_ | {html}Get the cleanupInterval{html} | + | concurrentStoreAndDispatchQueues | _boolean_ | {html}{html} | + | concurrentStoreAndDispatchTopics | _boolean_ | {html}{html} | + | databaseLockedWaitDelay | _int_ | {html}{html} | + | directory | _java.io.File_ | {html}Get the directory{html} | + | directoryArchive | _java.io.File_ | {html}{html} | + | enableIndexDiskSyncs | _boolean_ | {html}{html} | + | enableIndexPageCaching | _boolean_ | {html}{html} | + | enableIndexRecoveryFile | _boolean_ | {html}{html} | + | enableIndexWriteAsync | _boolean_ | {html}Get the enableIndexWriteAsync{html} | + | enableJournalDiskSyncs | _boolean_ | {html}Get the enableJournalDiskSyncs{html} | + | failoverProducersAuditDepth | _int_ | {html}set the audit window depth for duplicate suppression (should exceed the max transaction +batch){html} | + | forceRecoverIndex | _boolean_ | {html}{html} | + | ignoreMissingJournalfiles | _boolean_ | {html}Get the ignoreMissingJournalfiles{html} | + | indexCacheSize | _java.lang.String_ | {html}Get the indexCacheSize{html} | + | indexLFUEvictionFactor | _float_ | {html}{html} | + | indexWriteBatchSize | _java.lang.String_ | {html}Get the indexWriteBatchSize{html} | + | journalMaxFileLength | _java.lang.String_ | {html}Get the journalMaxFileLength{html} | + | journalMaxWriteBatchSize | _java.lang.String_ | {html}Get the journalMaxWriteBatchSize{html} | + | lockKeepAlivePeriod | _long_ | {html}{html} | + | locker | _[org.apache.activemq.broker.Locker|#org.apache.activemq.broker.Locker-types]_ | {html}{html} | + | maxAsyncJobs | _int_ | {html}{html} | + | maxFailoverProducersToTrack | _int_ | {html}Set the max number of producers (LRU cache) to track for duplicate sends{html} | + | rewriteOnRedelivery | _boolean_ | {html}When true, persist the redelivery status such that the message redelivery flag can survive a broker failure +used with org.apache.activemq.ActiveMQConnectionFactory#setTransactedIndividualAck(boolean) true{html} | + | usageManager | _[org.apache.activemq.usage.SystemUsage|#org.apache.activemq.usage.SystemUsage-types]_ | {html}{html} | + | useIndexLFRUEviction | _boolean_ | {html}{html} | + | useLock | _boolean_ | {html}{html} | + +{anchor:kahaPersistenceAdapter-element} +h3. The _[|#kahaPersistenceAdapter-element]_ Element + {html}{html} +h4. Properties + || Property Name || Type || Description || + | brokerName | _java.lang.String_ | {html}{html} | + | brokerService | _[org.apache.activemq.broker.BrokerService|#org.apache.activemq.broker.BrokerService-types]_ | {html}{html} | + | directory | _java.io.File_ | {html}{html} | + | maxDataFileLength | _java.lang.String_ | {html}When set using Xbean, values of the form "20 Mb", "1024kb", and "1g" can be used{html} | + | persistentIndex | _boolean_ | {html}{html} | + | size | _java.util.concurrent.atomic.AtomicLong_ | {html}{html} | + | usageManager | _[org.apache.activemq.usage.SystemUsage|#org.apache.activemq.usage.SystemUsage-types]_ | {html}{html} | + +{anchor:lDAPAuthorizationMap-element} +h3. The _[|#lDAPAuthorizationMap-element]_ Element + {html}An {@link AuthorizationMap} which uses LDAP{html} +h4. Properties + || Property Name || Type || Description || + | adminAttribute | _java.lang.String_ | {html}{html} | + | adminBase | _java.lang.String_ | {html}{html} | + | advisorySearchBase | _java.lang.String_ | {html}{html} | + | authentication | _java.lang.String_ | {html}{html} | + | connectionPassword | _java.lang.String_ | {html}{html} | + | connectionProtocol | _java.lang.String_ | {html}{html} | + | connectionURL | _java.lang.String_ | {html}{html} | + | connectionUsername | _java.lang.String_ | {html}{html} | + | context | _javax.naming.directory.DirContext_ | {html}{html} | + | initialContextFactory | _java.lang.String_ | {html}{html} | + | options | _java.util.Map_ | {html}{html} | + | queueSearchMatchingFormat | _java.text.MessageFormat_ | {html}{html} | + | queueSearchSubtreeBool | _boolean_ | {html}{html} | + | readAttribute | _java.lang.String_ | {html}{html} | + | readBase | _java.lang.String_ | {html}{html} | + | tempSearchBase | _java.lang.String_ | {html}{html} | + | topicSearchMatchingFormat | _java.text.MessageFormat_ | {html}{html} | + | topicSearchSubtreeBool | _boolean_ | {html}{html} | + | useAdvisorySearchBase | _boolean_ | {html}{html} | + | writeAttribute | _java.lang.String_ | {html}{html} | + | writeBase | _java.lang.String_ | {html}{html} | + +{anchor:lastImageSubscriptionRecoveryPolicy-element} +h3. The _[|#lastImageSubscriptionRecoveryPolicy-element]_ Element + {html}This implementation of {@link SubscriptionRecoveryPolicy} will only keep the +last message.{html} +h4. Properties + || Property Name || Type || Description || + | broker | _[org.apache.activemq.broker.Broker|#org.apache.activemq.broker.Broker-types]_ | {html}{html} | + +{anchor:ldapNetworkConnector-element} +h3. The _[|#ldapNetworkConnector-element]_ Element + {html}class to create dynamic network connectors listed in an directory +server using the LDAP v3 protocol as defined in RFC 2251, the +entries listed in the directory server must implement the ipHost +and ipService objectClasses as defined in RFC 2307.{html} +h4. Properties + || Property Name || Type || Description || + | alwaysSyncSend | _boolean_ | {html}{html} | + | anonymousAuthentication | _boolean_ | {html}sets LDAP anonymous authentication access credentials{html} | + | base | _java.lang.String_ | {html}sets the base LDAP dn used for lookup operations{html} | + | bridgeTempDestinations | _boolean_ | {html}{html} | + | brokerName | _java.lang.String_ | {html}{html} | + | brokerService | _[org.apache.activemq.broker.BrokerService|#org.apache.activemq.broker.BrokerService-types]_ | {html}{html} | + | brokerURL | _java.lang.String_ | {html}{html} | + | conduitSubscriptions | _boolean_ | {html}{html} | + | connectionFilter | _org.apache.activemq.network.ConnectionFilter_ | {html}{html} | + | consumerPriorityBase | _int_ | {html}{html} | + | decreaseNetworkConsumerPriority | _boolean_ | {html}{html} | + | destinationFilter | _java.lang.String_ | {html}{html} | + | dispatchAsync | _boolean_ | {html}{html} | + | duplex | _boolean_ | {html}{html} | + | durableDestinations | (_java.lang.Object_)\* | {html}{html} | + | dynamicOnly | _boolean_ | {html}{html} | + | dynamicallyIncludedDestinations | (_java.lang.Object_)\* | {html}{html} | + | excludedDestinations | (_java.lang.Object_)\* | {html}{html} | + | localUri | _java.net.URI_ | {html}{html} | + | name | _java.lang.String_ | {html}{html} | + | networkTTL | _int_ | {html}{html} | + | objectName | _javax.management.ObjectName_ | {html}{html} | + | password | _java.lang.String_ | {html}sets the LDAP password for access credentials{html} | + | prefetchSize | _java.lang.String_ | {html}{html} | + | searchEventListener | _boolean_ | {html}enables/disable a persistent search to the LDAP server as defined +in draft-ietf-ldapext-psearch-03.txt (2.16.840.1.113730.3.4.3){html} | + | searchFilter | _java.lang.String_ | {html}sets the LDAP search filter as defined in RFC 2254{html} | + | searchScope | _java.lang.String_ | {html}sets the LDAP search scope{html} | + | staticBridge | _boolean_ | {html}{html} | + | staticallyIncludedDestinations | (_java.lang.Object_)\* | {html}{html} | + | suppressDuplicateQueueSubscriptions | _boolean_ | {html}{html} | + | suppressDuplicateTopicSubscriptions | _boolean_ | {html}{html} | + | uri | _java.net.URI_ | {html}returns the next URI from the configured list{html} | + | useCompression | _boolean_ | {html}{html} | + | user | _java.lang.String_ | {html}sets the LDAP user for access credentials{html} | + | userName | _java.lang.String_ | {html}{html} | + +{anchor:lease-database-locker-element} +h3. The _[|#lease-database-locker-element]_ Element + {html}Represents an exclusive lease on a database to avoid multiple brokers running +against the same logical database.{html} +h4. Properties + || Property Name || Type || Description || + | failIfLocked | _boolean_ | {html}{html} | + | leaseHolderId | _java.lang.String_ | {html}{html} | + | lockAcquireSleepInterval | _long_ | {html}{html} | + | maxAllowableDiffFromDBTime | _int_ | {html}{html} | + | name | _java.lang.String_ | {html}{html} | + | queryTimeout | _int_ | {html}{html} | + +{anchor:levelDB-element} +h3. The _[|#levelDB-element]_ Element + {html}An implementation of {@link org.apache.activemq.store.PersistenceAdapter} designed for use with +LevelDB - Embedded Lightweight Non-Relational Database{html} +h4. Properties + || Property Name || Type || Description || + | asyncBufferSize | _int_ | {html}{html} | + | brokerName | _java.lang.String_ | {html}{html} | + | brokerService | _[org.apache.activemq.broker.BrokerService|#org.apache.activemq.broker.BrokerService-types]_ | {html}{html} | + | directory | _java.io.File_ | {html}{html} | + | failIfLocked | _boolean_ | {html}{html} | + | flushDelay | _int_ | {html}{html} | + | indexBlockRestartInterval | _int_ | {html}{html} | + | indexBlockSize | _int_ | {html}{html} | + | indexCacheSize | _long_ | {html}{html} | + | indexCompression | _java.lang.String_ | {html}{html} | + | indexFactory | _java.lang.String_ | {html}{html} | + | indexMaxOpenFiles | _int_ | {html}{html} | + | indexWriteBufferSize | _int_ | {html}{html} | + | logCompression | _java.lang.String_ | {html}{html} | + | logDirectory | _java.io.File_ | {html}{html} | + | logSize | _long_ | {html}{html} | + | monitorStats | _boolean_ | {html}{html} | + | paranoidChecks | _boolean_ | {html}{html} | + | sync | _boolean_ | {html}{html} | + | usageManager | _[org.apache.activemq.usage.SystemUsage|#org.apache.activemq.usage.SystemUsage-types]_ | {html}{html} | + | verifyChecksums | _boolean_ | {html}{html} | + +{anchor:loggingBrokerPlugin-element} +h3. The _[|#loggingBrokerPlugin-element]_ Element + {html}A simple Broker intercepter which allows you to enable/disable logging.{html} +h4. Properties + || Property Name || Type || Description || + | adminConnectionContext | _org.apache.activemq.broker.ConnectionContext_ | {html}{html} | + | logAll | _boolean_ | {html}Logger all Events that go through the Plugin{html} | + | logConnectionEvents | _boolean_ | {html}Logger Events that are related to connections{html} | + | logConsumerEvents | _boolean_ | {html}Logger Events that are related to Consumers{html} | + | logInternalEvents | _boolean_ | {html}Logger Events that are normally internal to the broker{html} | + | logMessageEvents | _boolean_ | {html}Logger Events that are related to message processing{html} | + | logProducerEvents | _boolean_ | {html}Logger Events that are related to Producers{html} | + | logSessionEvents | _boolean_ | {html}Logger Events that are related to sessions{html} | + | logTransactionEvents | _boolean_ | {html}Logger Events that are related to transaction processing{html} | + | next | _[org.apache.activemq.broker.Broker|#org.apache.activemq.broker.Broker-types]_ | {html}{html} | + +{anchor:mKahaDB-element} +h3. The _[|#mKahaDB-element]_ Element + {html}An implementation of {@link org.apache.activemq.store.PersistenceAdapter} that supports +distribution of destinations across multiple kahaDB persistence adapters{html} +h4. Properties + || Property Name || Type || Description || + | brokerName | _java.lang.String_ | {html}{html} | + | brokerService | _[org.apache.activemq.broker.BrokerService|#org.apache.activemq.broker.BrokerService-types]_ | {html}{html} | + | directory | _java.io.File_ | {html}{html} | + | entries | (_java.lang.Object_)\* | {html}A helper method to allow the destination map to be populated from a +dependency injection framework such as Spring{html} | + | filteredPersistenceAdapters | (_java.lang.Object_)\* | {html}Sets the FilteredKahaDBPersistenceAdapter entries{html} | + | journalMaxFileLength | _java.lang.String_ | {html}Set the max file length of the transaction journal +When set using Xbean, values of the form "20 Mb", "1024kb", and "1g" can +be used{html} | + | journalWriteBatchSize | _java.lang.String_ | {html}Set the max write batch size of the transaction journal +When set using Xbean, values of the form "20 Mb", "1024kb", and "1g" can +be used{html} | + | transactionStore | _org.apache.activemq.store.kahadb.MultiKahaDBTransactionStore_ | {html}{html} | + | usageManager | _[org.apache.activemq.usage.SystemUsage|#org.apache.activemq.usage.SystemUsage-types]_ | {html}{html} | + +{anchor:managementContext-element} +h3. The _[|#managementContext-element]_ Element + {html}An abstraction over JMX mbean registration{html} +h4. Properties + || Property Name || Type || Description || + | MBeanServer | _javax.management.MBeanServer_ | {html}Get the MBeanServer{html} | + | allowRemoteAddressInMBeanNames | _boolean_ | {html}{html} | + | brokerName | _java.lang.String_ | {html}Gets the broker name this context is used by, may be null +if the broker name was not set.{html} | + | connectorHost | _java.lang.String_ | {html}Get the connectorHost{html} | + | connectorPath | _java.lang.String_ | {html}{html} | + | connectorPort | _java.lang.String_ | {html}{html} | + | createConnector | _java.lang.String_ | {html}{html} | + | createMBeanServer | _boolean_ | {html}{html} | + | environment | _java.util.Map_ | {html}{html} | + | findTigerMbeanServer | _boolean_ | {html}Enables/disables the searching for the Java 5 platform MBeanServer{html} | + | jmxDomainName | _java.lang.String_ | {html}{html} | + | rmiServerPort | _java.lang.String_ | {html}{html} | + | server | _javax.management.MBeanServer_ | {html}{html} | + | useMBeanServer | _boolean_ | {html}{html} | + +{anchor:masterConnector-element} +h3. The _[|#masterConnector-element]_ Element + {html}Connects a Slave Broker to a Master when using Master Slave for High +Availability of messages.{html} +h4. Properties + || Property Name || Type || Description || + | brokerService | _[org.apache.activemq.broker.BrokerService|#org.apache.activemq.broker.BrokerService-types]_ | {html}{html} | + | failedToStart | _boolean_ | {html}Get the failedToStart{html} | + | localURI | _java.net.URI_ | {html}{html} | + | password | _java.lang.String_ | {html}{html} | + | remoteURI | _java.net.URI_ | {html}{html} | + | remoteUri | _java.lang.String_ | {html}{html} | + | userName | _java.lang.String_ | {html}{html} | + +{anchor:maxdb-jdbc-adapter-element} +h3. The _[|#maxdb-jdbc-adapter-element]_ Element + {html}JDBC Adapter for the MaxDB database.{html} +h4. Properties + || Property Name || Type || Description || + | batchStatments | _boolean_ | {html}{html} | + | maxRows | _int_ | {html}{html} | + | statements | _[org.apache.activemq.store.jdbc.Statements|#org.apache.activemq.store.jdbc.Statements-types]_ | {html}{html} | + | useExternalMessageReferences | _boolean_ | {html}{html} | + +{anchor:memoryPersistenceAdapter-element} +h3. The _[|#memoryPersistenceAdapter-element]_ Element + {html}{html} +h4. Properties + || Property Name || Type || Description || + | brokerName | _java.lang.String_ | {html}{html} | + | createTransactionStore | _boolean_ | {html}{html} | + | directory | _java.io.File_ | {html}{html} | + | usageManager | _[org.apache.activemq.usage.SystemUsage|#org.apache.activemq.usage.SystemUsage-types]_ | {html}{html} | + | useExternalMessageReferences | _boolean_ | {html}{html} | + +{anchor:memoryUsage-element} +h3. The _[|#memoryUsage-element]_ Element + {html}Used to keep track of how much of something is being used so that a +productive working set usage can be controlled. Main use case is manage +memory usage.{html} +h4. Properties + || Property Name || Type || Description || + | executor | _java.util.concurrent.ThreadPoolExecutor_ | {html}{html} | + | limit | _java.lang.String_ | {html}Sets the memory limit in bytes. Setting the limit in bytes will set the +usagePortion to 0 since the UsageManager is not going to be portion based +off the parent. +When set using Xbean, values of the form "20 Mb", "1024kb", and "1g" can be used{html} | + | limiter | _[org.apache.activemq.usage.UsageCapacity|#org.apache.activemq.usage.UsageCapacity-types]_ | {html}{html} | + | name | _java.lang.String_ | {html}{html} | + | parent | _T_ | {html}{html} | + | percentUsage | _int_ | {html}{html} | + | percentUsageMinDelta | _java.lang.String_ | {html}Sets the minimum number of percentage points the usage has to change +before a UsageListener event is fired by the manager.{html} | + | pollingTime | _int_ | {html}{html} | + | portion | _float_ | {html}{html} | + | usage | _long_ | {html}{html} | + | usagePortion | _float_ | {html}{html} | + +{anchor:messageGroupHashBucketFactory-element} +h3. The _[|#messageGroupHashBucketFactory-element]_ Element + {html}A factory to create instances of {@link SimpleMessageGroupMap} when +implementing the Message +Groups functionality.{html} +h4. Properties + || Property Name || Type || Description || + | bucketCount | _int_ | {html}Sets the number of hash buckets to use for the message group +functionality. This is only applicable to using message groups to +parallelize processing of a queue while preserving order across an +individual JMSXGroupID header value. This value sets the number of hash +buckets that will be used (i.e. the maximum possible concurrency).{html} | + +{anchor:mirroredQueue-element} +h3. The _[|#mirroredQueue-element]_ Element + {html}Creates Mirrored +Queue using a prefix and postfix to define the topic name on which to mirror the queue to.{html} +h4. Properties + || Property Name || Type || Description || + | brokerService | _[org.apache.activemq.broker.BrokerService|#org.apache.activemq.broker.BrokerService-types]_ | {html}{html} | + | copyMessage | _boolean_ | {html}Sets whether a copy of the message will be sent to each destination. +Defaults to true so that the forward destination is set as the +destination of the message{html} | + | postfix | _java.lang.String_ | {html}Sets any postix used to identify the queue consumers{html} | + | prefix | _java.lang.String_ | {html}Sets the prefix wildcard used to identify the queue consumers for a given +topic{html} | + +{anchor:multicastNetworkConnector-element} +h3. The _[|#multicastNetworkConnector-element]_ Element + {html}A network connector which uses some kind of multicast-like transport that +communicates with potentially many remote brokers over a single logical +{@link Transport} instance such as when using multicast. + +This implementation does not depend on multicast at all; any other group +based transport could be used.{html} +h4. Properties + || Property Name || Type || Description || + | alwaysSyncSend | _boolean_ | {html}{html} | + | bridge | _[org.apache.activemq.network.DemandForwardingBridgeSupport|#org.apache.activemq.network.DemandForwardingBridgeSupport-types]_ | {html}{html} | + | bridgeTempDestinations | _boolean_ | {html}{html} | + | brokerName | _java.lang.String_ | {html}{html} | + | brokerService | _[org.apache.activemq.broker.BrokerService|#org.apache.activemq.broker.BrokerService-types]_ | {html}{html} | + | brokerURL | _java.lang.String_ | {html}{html} | + | conduitSubscriptions | _boolean_ | {html}{html} | + | connectionFilter | _org.apache.activemq.network.ConnectionFilter_ | {html}{html} | + | consumerPriorityBase | _int_ | {html}{html} | + | decreaseNetworkConsumerPriority | _boolean_ | {html}{html} | + | destinationFilter | _java.lang.String_ | {html}{html} | + | dispatchAsync | _boolean_ | {html}{html} | + | duplex | _boolean_ | {html}{html} | + | durableDestinations | (_java.lang.Object_)\* | {html}{html} | + | dynamicOnly | _boolean_ | {html}{html} | + | dynamicallyIncludedDestinations | (_java.lang.Object_)\* | {html}{html} | + | excludedDestinations | (_java.lang.Object_)\* | {html}{html} | + | localTransport | _org.apache.activemq.transport.Transport_ | {html}{html} | + | localUri | _java.net.URI_ | {html}{html} | + | name | _java.lang.String_ | {html}{html} | + | networkTTL | _int_ | {html}{html} | + | objectName | _javax.management.ObjectName_ | {html}{html} | + | password | _java.lang.String_ | {html}{html} | + | prefetchSize | _java.lang.String_ | {html}{html} | + | remoteTransport | _org.apache.activemq.transport.Transport_ | {html}Sets the remote transport implementation{html} | + | remoteURI | _java.net.URI_ | {html}Sets the remote transport URI to some group transport like +multicast://address:port{html} | + | staticBridge | _boolean_ | {html}{html} | + | staticallyIncludedDestinations | (_java.lang.Object_)\* | {html}{html} | + | suppressDuplicateQueueSubscriptions | _boolean_ | {html}{html} | + | suppressDuplicateTopicSubscriptions | _boolean_ | {html}{html} | + | useCompression | _boolean_ | {html}{html} | + | userName | _java.lang.String_ | {html}{html} | + +{anchor:multicastTraceBrokerPlugin-element} +h3. The _[|#multicastTraceBrokerPlugin-element]_ Element + {html}A Broker interceptor which allows you to trace all operations to a Multicast +socket.{html} +h4. Properties + || Property Name || Type || Description || + | address | _java.net.SocketAddress_ | {html}{html} | + | adminConnectionContext | _org.apache.activemq.broker.ConnectionContext_ | {html}{html} | + | broadcast | _boolean_ | {html}{html} | + | destination | _java.net.URI_ | {html}{html} | + | maxTraceDatagramSize | _int_ | {html}{html} | + | next | _[org.apache.activemq.broker.Broker|#org.apache.activemq.broker.Broker-types]_ | {html}{html} | + | timeToLive | _int_ | {html}{html} | + | wireFormat | _org.apache.activemq.wireformat.WireFormat_ | {html}{html} | + | wireFormatFactory | _org.apache.activemq.wireformat.WireFormatFactory_ | {html}{html} | + +{anchor:mysql-jdbc-adapter-element} +h3. The _[|#mysql-jdbc-adapter-element]_ Element + {html}{html} +h4. Properties + || Property Name || Type || Description || + | batchStatments | _boolean_ | {html}{html} | + | engineType | _java.lang.String_ | {html}{html} | + | maxRows | _int_ | {html}{html} | + | statements | _[org.apache.activemq.store.jdbc.Statements|#org.apache.activemq.store.jdbc.Statements-types]_ | {html}{html} | + | typeStatement | _java.lang.String_ | {html}{html} | + | useExternalMessageReferences | _boolean_ | {html}{html} | + +{anchor:networkConnector-element} +h3. The _[|#networkConnector-element]_ Element + {html}A network connector which uses a discovery agent to detect the remote brokers +available and setup a connection to each available remote broker{html} +h4. Properties + || Property Name || Type || Description || + | alwaysSyncSend | _boolean_ | {html}{html} | + | bridgeTempDestinations | _boolean_ | {html}{html} | + | brokerName | _java.lang.String_ | {html}{html} | + | brokerService | _[org.apache.activemq.broker.BrokerService|#org.apache.activemq.broker.BrokerService-types]_ | {html}{html} | + | brokerURL | _java.lang.String_ | {html}{html} | + | conduitSubscriptions | _boolean_ | {html}{html} | + | connectionFilter | _org.apache.activemq.network.ConnectionFilter_ | {html}{html} | + | consumerPriorityBase | _int_ | {html}{html} | + | decreaseNetworkConsumerPriority | _boolean_ | {html}{html} | + | destinationFilter | _java.lang.String_ | {html}{html} | + | discoveryAgent | _org.apache.activemq.transport.discovery.DiscoveryAgent_ | {html}{html} | + | discoveryURI | _java.net.URI_ | {html}{html} | + | dispatchAsync | _boolean_ | {html}{html} | + | duplex | _boolean_ | {html}{html} | + | durableDestinations | (_java.lang.Object_)\* | {html}{html} | + | dynamicOnly | _boolean_ | {html}{html} | + | dynamicallyIncludedDestinations | (_java.lang.Object_)\* | {html}{html} | + | excludedDestinations | (_java.lang.Object_)\* | {html}{html} | + | localUri | _java.net.URI_ | {html}{html} | + | name | _java.lang.String_ | {html}{html} | + | networkTTL | _int_ | {html}{html} | + | objectName | _javax.management.ObjectName_ | {html}{html} | + | password | _java.lang.String_ | {html}{html} | + | prefetchSize | _java.lang.String_ | {html}{html} | + | staticBridge | _boolean_ | {html}{html} | + | staticallyIncludedDestinations | (_java.lang.Object_)\* | {html}{html} | + | suppressDuplicateQueueSubscriptions | _boolean_ | {html}{html} | + | suppressDuplicateTopicSubscriptions | _boolean_ | {html}{html} | + | uri | _java.net.URI_ | {html}{html} | + | useCompression | _boolean_ | {html}{html} | + | userName | _java.lang.String_ | {html}{html} | + +{anchor:noSubscriptionRecoveryPolicy-element} +h3. The _[|#noSubscriptionRecoveryPolicy-element]_ Element + {html}This SubscriptionRecoveryPolicy disable recovery of messages.{html} +h4. Properties + || Property Name || Type || Description || + | broker | _[org.apache.activemq.broker.Broker|#org.apache.activemq.broker.Broker-types]_ | {html}{html} | + +{anchor:oldestMessageEvictionStrategy-element} +h3. The _[|#oldestMessageEvictionStrategy-element]_ Element + {html}An eviction strategy which evicts the oldest message first (which is the +default).{html} +h4. Properties + || Property Name || Type || Description || + | evictExpiredMessagesHighWatermark | _int_ | {html}Sets the high water mark on which we will eagerly evict expired messages from RAM{html} | + +{anchor:oldestMessageWithLowestPriorityEvictionStrategy-element} +h3. The _[|#oldestMessageWithLowestPriorityEvictionStrategy-element]_ Element + {html}An eviction strategy which evicts the oldest message with the lowest priority first.{html} +h4. Properties + || Property Name || Type || Description || + | evictExpiredMessagesHighWatermark | _int_ | {html}Sets the high water mark on which we will eagerly evict expired messages from RAM{html} | + +{anchor:oracleBlobJDBCAdapter-element} +h3. The _[|#oracleBlobJDBCAdapter-element]_ Element + {html}Implements all the default JDBC operations that are used +by the JDBCPersistenceAdapter. +

    +Subclassing is encouraged to override the default +implementation of methods to account for differences +in JDBC Driver implementations. +

    +The JDBCAdapter inserts and extracts BLOB data using the +getBytes()/setBytes() operations. +

    +The databases/JDBC drivers that use this adapter are: +

      +
    • +
    {html} +h4. Properties + || Property Name || Type || Description || + | batchStatments | _boolean_ | {html}{html} | + | maxRows | _int_ | {html}{html} | + | statements | _[org.apache.activemq.store.jdbc.Statements|#org.apache.activemq.store.jdbc.Statements-types]_ | {html}{html} | + | useExternalMessageReferences | _boolean_ | {html}{html} | + +{anchor:oracleJDBCAdapter-element} +h3. The _[|#oracleJDBCAdapter-element]_ Element + {html}Implements all the default JDBC operations that are used +by the JDBCPersistenceAdapter. +

    +Subclassing is encouraged to override the default +implementation of methods to account for differences +in JDBC Driver implementations. +

    {html} +h4. Properties + || Property Name || Type || Description || + | batchStatments | _boolean_ | {html}{html} | + | maxRows | _int_ | {html}{html} | + | statements | _[org.apache.activemq.store.jdbc.Statements|#org.apache.activemq.store.jdbc.Statements-types]_ | {html}{html} | + | useExternalMessageReferences | _boolean_ | {html}{html} | + +{anchor:outboundQueueBridge-element} +h3. The _[|#outboundQueueBridge-element]_ Element + {html}Create an Outbound Queue Bridge. By default the bridge uses the same +name for both the inbound and outbound queues, however this can be altered +by using the public setter methods to configure both inbound and outbound +queue names.{html} +h4. Properties + || Property Name || Type || Description || + | consumer | _javax.jms.MessageConsumer_ | {html}{html} | + | consumerConnection | _javax.jms.QueueConnection_ | {html}{html} | + | consumerQueue | _[javax.jms.Queue|#javax.jms.Queue-types]_ | {html}{html} | + | doHandleReplyTo | _boolean_ | {html}{html} | + | jmsConnector | _[org.apache.activemq.network.jms.JmsConnector|#org.apache.activemq.network.jms.JmsConnector-types]_ | {html}{html} | + | jmsMessageConvertor | _[org.apache.activemq.network.jms.JmsMesageConvertor|#org.apache.activemq.network.jms.JmsMesageConvertor-types]_ | {html}{html} | + | localQueueName | _java.lang.String_ | {html}{html} | + | outboundQueueName | _java.lang.String_ | {html}Sets the name of the outbound queue name. If the inbound queue name +has not been set already then this method uses the provided queue name +to set the inbound topic name as well.{html} | + | producerConnection | _javax.jms.QueueConnection_ | {html}{html} | + | producerQueue | _[javax.jms.Queue|#javax.jms.Queue-types]_ | {html}{html} | + | selector | _java.lang.String_ | {html}{html} | + +{anchor:outboundTopicBridge-element} +h3. The _[|#outboundTopicBridge-element]_ Element + {html}Create an Outbound Topic Bridge. By default the bridge uses the same +name for both the inbound and outbound topics, however this can be altered +by using the public setter methods to configure both inbound and outbound +topic names.{html} +h4. Properties + || Property Name || Type || Description || + | consumer | _javax.jms.MessageConsumer_ | {html}{html} | + | consumerConnection | _javax.jms.TopicConnection_ | {html}{html} | + | consumerName | _java.lang.String_ | {html}{html} | + | consumerTopic | _[javax.jms.Topic|#javax.jms.Topic-types]_ | {html}{html} | + | doHandleReplyTo | _boolean_ | {html}{html} | + | jmsConnector | _[org.apache.activemq.network.jms.JmsConnector|#org.apache.activemq.network.jms.JmsConnector-types]_ | {html}{html} | + | jmsMessageConvertor | _[org.apache.activemq.network.jms.JmsMesageConvertor|#org.apache.activemq.network.jms.JmsMesageConvertor-types]_ | {html}{html} | + | localTopicName | _java.lang.String_ | {html}{html} | + | outboundTopicName | _java.lang.String_ | {html}Sets the name of the outbound topic name. If the inbound topic name +has not been set already then this method uses the provided topic name +to set the inbound topic name as well.{html} | + | producerConnection | _javax.jms.TopicConnection_ | {html}{html} | + | producerTopic | _[javax.jms.Topic|#javax.jms.Topic-types]_ | {html}{html} | + | selector | _java.lang.String_ | {html}{html} | + +{anchor:pListStore-element} +h3. The _[|#pListStore-element]_ Element + {html}{html} +h4. Properties + || Property Name || Type || Description || + | brokerService | _[org.apache.activemq.broker.BrokerService|#org.apache.activemq.broker.BrokerService-types]_ | {html}{html} | + | cleanupInterval | _long_ | {html}{html} | + | directory | _java.io.File_ | {html}{html} | + | enableIndexWriteAsync | _boolean_ | {html}{html} | + | failIfDatabaseIsLocked | _boolean_ | {html}{html} | + | indexCacheSize | _int_ | {html}{html} | + | indexEnablePageCaching | _boolean_ | {html}{html} | + | indexPageSize | _int_ | {html}{html} | + | indexWriteBatchSize | _int_ | {html}{html} | + | journalMaxFileLength | _int_ | {html}{html} | + | journalMaxWriteBatchSize | _int_ | {html}{html} | + | lazyInit | _boolean_ | {html}{html} | + +{anchor:policyEntry-element} +h3. The _[|#policyEntry-element]_ Element + {html}Represents an entry in a {@link PolicyMap} for assigning policies to a +specific destination or a hierarchical wildcard area of destinations.{html} +h4. Properties + || Property Name || Type || Description || + | advisoryForConsumed | _boolean_ | {html}{html} | + | advisoryForDelivery | _boolean_ | {html}{html} | + | advisoryForDiscardingMessages | _boolean_ | {html}{html} | + | advisoryForFastProducers | _boolean_ | {html}{html} | + | advisoryForSlowConsumers | _boolean_ | {html}{html} | + | advisoryWhenFull | _boolean_ | {html}{html} | + | allConsumersExclusiveByDefault | _boolean_ | {html}{html} | + | alwaysRetroactive | _boolean_ | {html}{html} | + | blockedProducerWarningInterval | _long_ | {html}Set's the interval at which warnings about producers being blocked by +resource usage will be triggered. Values of 0 or less will disable +warnings{html} | + | consumersBeforeDispatchStarts | _int_ | {html}{html} | + | cursorMemoryHighWaterMark | _int_ | {html}{html} | + | deadLetterStrategy | _[org.apache.activemq.broker.region.policy.DeadLetterStrategy|#org.apache.activemq.broker.region.policy.DeadLetterStrategy-types]_ | {html}Sets the policy used to determine which dead letter queue destination +should be used{html} | + | destination | _[org.apache.activemq.command.ActiveMQDestination|#org.apache.activemq.command.ActiveMQDestination-types]_ | {html}{html} | + | dispatchPolicy | _[org.apache.activemq.broker.region.policy.DispatchPolicy|#org.apache.activemq.broker.region.policy.DispatchPolicy-types]_ | {html}{html} | + | doOptimzeMessageStorage | _boolean_ | {html}{html} | + | durableTopicPrefetch | _int_ | {html}Get the durableTopicPrefetch{html} | + | enableAudit | _boolean_ | {html}{html} | + | expireMessagesPeriod | _long_ | {html}{html} | + | gcInactiveDestinations | _boolean_ | {html}{html} | + | gcWithNetworkConsumers | _boolean_ | {html}{html} | + | inactiveTimoutBeforeGC | _long_ | {html}{html} | + | lazyDispatch | _boolean_ | {html}{html} | + | maxAuditDepth | _int_ | {html}{html} | + | maxBrowsePageSize | _int_ | {html}{html} | + | maxExpirePageSize | _int_ | {html}{html} | + | maxPageSize | _int_ | {html}{html} | + | maxProducersToAudit | _int_ | {html}{html} | + | maxQueueAuditDepth | _int_ | {html}{html} | + | memoryLimit | _java.lang.String_ | {html}When set using Xbean, values of the form "20 Mb", "1024kb", and "1g" can be used{html} | + | messageEvictionStrategy | _[org.apache.activemq.broker.region.policy.MessageEvictionStrategy|#org.apache.activemq.broker.region.policy.MessageEvictionStrategy-types]_ | {html}Sets the eviction strategy used to decide which message to evict when the +slow consumer needs to discard messages{html} | + | messageGroupMapFactory | _[org.apache.activemq.broker.region.group.MessageGroupMapFactory|#org.apache.activemq.broker.region.group.MessageGroupMapFactory-types]_ | {html}Sets the factory used to create new instances of {MessageGroupMap} used +to implement the Message Groups +functionality.{html} | + | minimumMessageSize | _long_ | {html}{html} | + | networkBridgeFilterFactory | _[org.apache.activemq.network.NetworkBridgeFilterFactory|#org.apache.activemq.network.NetworkBridgeFilterFactory-types]_ | {html}{html} | + | optimizeMessageStoreInFlightLimit | _int_ | {html}{html} | + | optimizedDispatch | _boolean_ | {html}{html} | + | pendingDurableSubscriberPolicy | _[org.apache.activemq.broker.region.policy.PendingDurableSubscriberMessageStoragePolicy|#org.apache.activemq.broker.region.policy.PendingDurableSubscriberMessageStoragePolicy-types]_ | {html}{html} | + | pendingMessageLimitStrategy | _[org.apache.activemq.broker.region.policy.PendingMessageLimitStrategy|#org.apache.activemq.broker.region.policy.PendingMessageLimitStrategy-types]_ | {html}Sets the strategy to calculate the maximum number of messages that are +allowed to be pending on consumers (in addition to their prefetch sizes). +Once the limit is reached, non-durable topics can then start discarding +old messages. This allows us to keep dispatching messages to slow +consumers while not blocking fast consumers and discarding the messages +oldest first.{html} | + | pendingQueuePolicy | _[org.apache.activemq.broker.region.policy.PendingQueueMessageStoragePolicy|#org.apache.activemq.broker.region.policy.PendingQueueMessageStoragePolicy-types]_ | {html}{html} | + | pendingSubscriberPolicy | _[org.apache.activemq.broker.region.policy.PendingSubscriberMessageStoragePolicy|#org.apache.activemq.broker.region.policy.PendingSubscriberMessageStoragePolicy-types]_ | {html}{html} | + | prioritizedMessages | _boolean_ | {html}{html} | + | producerFlowControl | _boolean_ | {html}{html} | + | queue | _java.lang.String_ | {html}A helper method to set the destination from a configuration file{html} | + | queueBrowserPrefetch | _int_ | {html}Get the queueBrowserPrefetch{html} | + | queuePrefetch | _int_ | {html}Get the queuePrefetch{html} | + | reduceMemoryFootprint | _boolean_ | {html}{html} | + | sendAdvisoryIfNoConsumers | _boolean_ | {html}Sends an advisory message if a non-persistent message is sent and there +are no active consumers{html} | + | slowConsumerStrategy | _[org.apache.activemq.broker.region.policy.SlowConsumerStrategy|#org.apache.activemq.broker.region.policy.SlowConsumerStrategy-types]_ | {html}{html} | + | storeUsageHighWaterMark | _int_ | {html}{html} | + | strictOrderDispatch | _boolean_ | {html}{html} | + | subscriptionRecoveryPolicy | _[org.apache.activemq.broker.region.policy.SubscriptionRecoveryPolicy|#org.apache.activemq.broker.region.policy.SubscriptionRecoveryPolicy-types]_ | {html}{html} | + | tempQueue | _boolean_ | {html}{html} | + | tempTopic | _boolean_ | {html}{html} | + | timeBeforeDispatchStarts | _int_ | {html}{html} | + | topic | _java.lang.String_ | {html}A helper method to set the destination from a configuration file{html} | + | topicPrefetch | _int_ | {html}Get the topicPrefetch{html} | + | useCache | _boolean_ | {html}{html} | + | useConsumerPriority | _boolean_ | {html}{html} | + | usePrefetchExtension | _boolean_ | {html}{html} | + +{anchor:policyMap-element} +h3. The _[|#policyMap-element]_ Element + {html}Represents a destination based configuration of policies so that individual +destinations or wildcard hierarchies of destinations can be configured using +different policies.{html} +h4. Properties + || Property Name || Type || Description || + | defaultEntry | _[org.apache.activemq.broker.region.policy.PolicyEntry|#org.apache.activemq.broker.region.policy.PolicyEntry-types]_ | {html}{html} | + | entries | (_java.lang.Object_)\* | {html}A helper method to allow the destination map to be populated from a +dependency injection framework such as Spring{html} | + | policyEntries | (_java.lang.Object_)\* | {html}Sets the individual entries on the policy map{html} | + +{anchor:postgresql-jdbc-adapter-element} +h3. The _[|#postgresql-jdbc-adapter-element]_ Element + {html}Implements all the default JDBC operations that are used +by the JDBCPersistenceAdapter. +

    +Subclassing is encouraged to override the default +implementation of methods to account for differences +in JDBC Driver implementations. +

    +The JDBCAdapter inserts and extracts BLOB data using the +getBytes()/setBytes() operations. +

    +The databases/JDBC drivers that use this adapter are: +

      +
    • +
    {html} +h4. Properties + || Property Name || Type || Description || + | acksPkName | _java.lang.String_ | {html}{html} | + | batchStatments | _boolean_ | {html}{html} | + | maxRows | _int_ | {html}{html} | + | statements | _[org.apache.activemq.store.jdbc.Statements|#org.apache.activemq.store.jdbc.Statements-types]_ | {html}{html} | + | useExternalMessageReferences | _boolean_ | {html}{html} | + +{anchor:prefetchPolicy-element} +h3. The _[|#prefetchPolicy-element]_ Element + {html}Defines the prefetch message policies for different types of consumers{html} +h4. Properties + || Property Name || Type || Description || + | all | _int_ | {html}{html} | + | durableTopicPrefetch | _int_ | {html}{html} | + | inputStreamPrefetch | _int_ | {html}{html} | + | maximumPendingMessageLimit | _int_ | {html}Sets how many messages a broker will keep around, above the prefetch +limit, for non-durable topics before starting to discard older messages.{html} | + | optimizeDurableTopicPrefetch | _int_ | {html}{html} | + | queueBrowserPrefetch | _int_ | {html}{html} | + | queuePrefetch | _int_ | {html}{html} | + | topicPrefetch | _int_ | {html}{html} | + +{anchor:prefetchRatePendingMessageLimitStrategy-element} +h3. The _[|#prefetchRatePendingMessageLimitStrategy-element]_ Element + {html}This PendingMessageLimitStrategy sets the maximum pending message limit value to be +a multiplier of the prefetch limit of the subscription.{html} +h4. Properties + || Property Name || Type || Description || + | multiplier | _double_ | {html}Sets the multiplier of the prefetch size which will be used to define the maximum number of pending +messages for non-durable topics before messages are discarded.{html} | + +{anchor:priorityNetworkDispatchPolicy-element} +h3. The _[|#priorityNetworkDispatchPolicy-element]_ Element + {html}dispatch policy that ignores lower priority duplicate network consumers, +used in conjunction with network bridge suppresDuplicateTopicSubscriptions{html} + +{anchor:proxyConnector-element} +h3. The _[|#proxyConnector-element]_ Element + {html}{html} +h4. Properties + || Property Name || Type || Description || + | bind | _java.net.URI_ | {html}{html} | + | localUri | _java.net.URI_ | {html}{html} | + | name | _java.lang.String_ | {html}{html} | + | proxyToLocalBroker | _boolean_ | {html}{html} | + | remote | _java.net.URI_ | {html}{html} | + | server | _org.apache.activemq.transport.TransportServer_ | {html}{html} | + +{anchor:queryBasedSubscriptionRecoveryPolicy-element} +h3. The _[|#queryBasedSubscriptionRecoveryPolicy-element]_ Element + {html}This implementation of {@link SubscriptionRecoveryPolicy} will perform a user +specific query mechanism to load any messages they may have missed.{html} +h4. Properties + || Property Name || Type || Description || + | broker | _[org.apache.activemq.broker.Broker|#org.apache.activemq.broker.Broker-types]_ | {html}{html} | + | query | _org.apache.activemq.broker.region.policy.MessageQuery_ | {html}Sets the query strategy to load initial messages{html} | + +{anchor:queue-element} +h3. The _[|#queue-element]_ Element + {html}An ActiveMQ Queue{html} +h4. Properties + || Property Name || Type || Description || + | compositeDestinations | (_[org.apache.activemq.command.ActiveMQDestination|#org.apache.activemq.command.ActiveMQDestination-types]_)\* | {html}{html} | + | name | _java.lang.String_ | {html}{html} | + | physicalName | _java.lang.String_ | {html}{html} | + | properties | _java.util.Properties_ | {html}Get the properties from this instance for storing in JNDI{html} | + +{anchor:queueDispatchSelector-element} +h3. The _[|#queueDispatchSelector-element]_ Element + {html}Queue dispatch policy that determines if a message can be sent to a subscription{html} +h4. Properties + || Property Name || Type || Description || + | destination | _[org.apache.activemq.command.ActiveMQDestination|#org.apache.activemq.command.ActiveMQDestination-types]_ | {html}{html} | + | exclusiveConsumer | _org.apache.activemq.broker.region.Subscription_ | {html}{html} | + +{anchor:reconnectionPolicy-element} +h3. The _[|#reconnectionPolicy-element]_ Element + {html}A policy object that defines how a {@link JmsConnector} deals with +reconnection of the local and foreign connections.{html} +h4. Properties + || Property Name || Type || Description || + | backOffMultiplier | _double_ | {html}Gets the multiplier used to grow the delay between connection attempts from the initial +time to the max set time. By default this value is set to 2.0.{html} | + | initialReconnectDelay | _long_ | {html}Gets the initial delay value used before a reconnection attempt is made. If the +use exponential back-off value is set to false then this will be the fixed time +between connection attempts. By default this value is set to one second.{html} | + | maxInitialConnectAttempts | _int_ | {html}Gets the maximum number of times that the {@link JmsConnector} will try +to connect on startup to before it marks itself as failed and does not +try any further connections.{html} | + | maxReconnectAttempts | _int_ | {html}Gets the number of time that {@link JmsConnector} will attempt to connect +or reconnect before giving up. By default the policy sets this value to +a negative value meaning try forever.{html} | + | maxSendRetries | _int_ | {html}Gets the maximum number of a times a Message send should be retried before +a JMSExeception is thrown indicating that the operation failed.{html} | + | maximumReconnectDelay | _long_ | {html}Gets the maximum delay that is inserted between each attempt to connect +before another attempt is made. The default setting for this value is +30 seconds.{html} | + | sendRetyDelay | _long_ | {html}Set the amount of time the DestionationBridge will wait between attempts +to forward a message. The default policy limits the minimum time between +send attempt to one second.{html} | + | useExponentialBackOff | _boolean_ | {html}Gets whether the policy uses the set back-off multiplier to grow the time between +connection attempts.{html} | + +{anchor:redeliveryPlugin-element} +h3. The _[|#redeliveryPlugin-element]_ Element + {html}Replace regular DLQ handling with redelivery via a resend to the original destination +after a delay +A destination matching RedeliveryPolicy controls the quantity and delay for re-sends +If there is no matching policy or an existing policy limit is exceeded by default +regular DLQ processing resumes. This is controlled via sendToDlqIfMaxRetriesExceeded +and fallbackToDeadLetter{html} +h4. Properties + || Property Name || Type || Description || + | adminConnectionContext | _org.apache.activemq.broker.ConnectionContext_ | {html}{html} | + | fallbackToDeadLetter | _boolean_ | {html}What to do if there is no matching redelivery policy for a destination. +when true, the region broker DLQ processing will be used via sendToDeadLetterQueue +when false, there is no action{html} | + | next | _[org.apache.activemq.broker.Broker|#org.apache.activemq.broker.Broker-types]_ | {html}{html} | + | redeliveryPolicyMap | _[org.apache.activemq.broker.region.policy.RedeliveryPolicyMap|#org.apache.activemq.broker.region.policy.RedeliveryPolicyMap-types]_ | {html}{html} | + | sendToDlqIfMaxRetriesExceeded | _boolean_ | {html}What to do if the maxretries on a matching redelivery policy is exceeded. +when true, the region broker DLQ processing will be used via sendToDeadLetterQueue +when false, there is no action{html} | + +{anchor:redeliveryPolicy-element} +h3. The _[|#redeliveryPolicy-element]_ Element + {html}Configuration options for a messageConsumer used to control how messages are re-delivered when they +are rolled back. +May be used server side on a per destination basis via the Broker RedeliveryPlugin{html} +h4. Properties + || Property Name || Type || Description || + | backOffMultiplier | _double_ | {html}{html} | + | collisionAvoidancePercent | _short_ | {html}{html} | + | destination | _[org.apache.activemq.command.ActiveMQDestination|#org.apache.activemq.command.ActiveMQDestination-types]_ | {html}{html} | + | initialRedeliveryDelay | _long_ | {html}{html} | + | maximumRedeliveries | _int_ | {html}{html} | + | maximumRedeliveryDelay | _long_ | {html}{html} | + | queue | _java.lang.String_ | {html}A helper method to set the destination from a configuration file{html} | + | redeliveryDelay | _long_ | {html}{html} | + | tempQueue | _boolean_ | {html}{html} | + | tempTopic | _boolean_ | {html}{html} | + | topic | _java.lang.String_ | {html}A helper method to set the destination from a configuration file{html} | + | useCollisionAvoidance | _boolean_ | {html}{html} | + | useExponentialBackOff | _boolean_ | {html}{html} | + +{anchor:redeliveryPolicyMap-element} +h3. The _[|#redeliveryPolicyMap-element]_ Element + {html}Represents a destination based configuration of policies so that individual +destinations or wildcard hierarchies of destinations can be configured using +different policies.{html} +h4. Properties + || Property Name || Type || Description || + | defaultEntry | _[org.apache.activemq.RedeliveryPolicy|#org.apache.activemq.RedeliveryPolicy-types]_ | {html}{html} | + | entries | (_java.lang.Object_)\* | {html}A helper method to allow the destination map to be populated from a +dependency injection framework such as Spring{html} | + | redeliveryPolicyEntries | (_java.lang.Object_)\* | {html}Sets the individual entries on the redeliveryPolicyMap{html} | + +{anchor:roundRobinDispatchPolicy-element} +h3. The _[|#roundRobinDispatchPolicy-element]_ Element + {html}Simple dispatch policy that sends a message to every subscription that +matches the message.{html} + +{anchor:shared-file-locker-element} +h3. The _[|#shared-file-locker-element]_ Element + {html}Represents an exclusive lock on a database to avoid multiple brokers running +against the same logical database.{html} +h4. Properties + || Property Name || Type || Description || + | directory | _java.io.File_ | {html}{html} | + | failIfLocked | _boolean_ | {html}{html} | + | lockAcquireSleepInterval | _long_ | {html}{html} | + | name | _java.lang.String_ | {html}{html} | + +{anchor:sharedDeadLetterStrategy-element} +h3. The _[|#sharedDeadLetterStrategy-element]_ Element + {html}A default implementation of {@link DeadLetterStrategy} which uses +a constant destination.{html} +h4. Properties + || Property Name || Type || Description || + | deadLetterQueue | _[org.apache.activemq.command.ActiveMQDestination|#org.apache.activemq.command.ActiveMQDestination-types]_ | {html}{html} | + | enableAudit | _boolean_ | {html}{html} | + | processExpired | _boolean_ | {html}{html} | + | processNonPersistent | _boolean_ | {html}{html} | + +{anchor:simpleAuthenticationPlugin-element} +h3. The _[|#simpleAuthenticationPlugin-element]_ Element + {html}Provides a simple authentication plugin{html} +h4. Properties + || Property Name || Type || Description || + | anonymousAccessAllowed | _boolean_ | {html}{html} | + | anonymousGroup | _java.lang.String_ | {html}{html} | + | anonymousUser | _java.lang.String_ | {html}{html} | + | userGroups | _java.util.Map_ | {html}Sets the groups a user is in. The key is the user name and the value is a +Set of groups{html} | + | userPasswords | _java.util.Map_ | {html}Sets the map indexed by user name with the value the password{html} | + | users | (_java.lang.Object_)\* | {html}Sets individual users for authentication{html} | + +{anchor:simpleAuthorizationMap-element} +h3. The _[|#simpleAuthorizationMap-element]_ Element + {html}An AuthorizationMap which is configured with individual DestinationMaps for +each operation.{html} +h4. Properties + || Property Name || Type || Description || + | adminACLs | _[org.apache.activemq.filter.DestinationMap|#org.apache.activemq.filter.DestinationMap-types]_ | {html}{html} | + | readACLs | _[org.apache.activemq.filter.DestinationMap|#org.apache.activemq.filter.DestinationMap-types]_ | {html}{html} | + | tempDestinationAuthorizationEntry | _[org.apache.activemq.security.TempDestinationAuthorizationEntry|#org.apache.activemq.security.TempDestinationAuthorizationEntry-types]_ | {html}{html} | + | writeACLs | _[org.apache.activemq.filter.DestinationMap|#org.apache.activemq.filter.DestinationMap-types]_ | {html}{html} | + +{anchor:simpleDispatchPolicy-element} +h3. The _[|#simpleDispatchPolicy-element]_ Element + {html}Simple dispatch policy that sends a message to every subscription that +matches the message.{html} + +{anchor:simpleDispatchSelector-element} +h3. The _[|#simpleDispatchSelector-element]_ Element + {html}Simple dispatch policy that determines if a message can be sent to a subscription{html} +h4. Properties + || Property Name || Type || Description || + | destination | _[org.apache.activemq.command.ActiveMQDestination|#org.apache.activemq.command.ActiveMQDestination-types]_ | {html}{html} | + +{anchor:simpleJmsMessageConvertor-element} +h3. The _[|#simpleJmsMessageConvertor-element]_ Element + {html}Converts Message from one JMS to another{html} +h4. Properties + || Property Name || Type || Description || + | connection | _javax.jms.Connection_ | {html}{html} | + +{anchor:simpleMessageGroupMapFactory-element} +h3. The _[|#simpleMessageGroupMapFactory-element]_ Element + {html}A factory to create instances of {@link SimpleMessageGroupMap} when implementing the +Message Groups functionality.{html} + +{anchor:sslContext-element} +h3. The _[|#sslContext-element]_ Element + {html}Extends the SslContext so that it's easier to configure from spring.{html} +h4. Properties + || Property Name || Type || Description || + | SSLContext | _javax.net.ssl.SSLContext_ | {html}{html} | + | keyManagers | (_java.lang.Object_)\* | {html}{html} | + | keyStore | _java.lang.String_ | {html}{html} | + | keyStoreAlgorithm | _java.lang.String_ | {html}{html} | + | keyStoreKeyPassword | _java.lang.String_ | {html}{html} | + | keyStorePassword | _java.lang.String_ | {html}{html} | + | keyStoreType | _java.lang.String_ | {html}{html} | + | protocol | _java.lang.String_ | {html}{html} | + | provider | _java.lang.String_ | {html}{html} | + | secureRandom | _java.security.SecureRandom_ | {html}{html} | + | secureRandomAlgorithm | _java.lang.String_ | {html}{html} | + | trustManagers | (_java.lang.Object_)\* | {html}{html} | + | trustStore | _java.lang.String_ | {html}{html} | + | trustStoreAlgorithm | _java.lang.String_ | {html}{html} | + | trustStorePassword | _java.lang.String_ | {html}{html} | + | trustStoreType | _java.lang.String_ | {html}{html} | + +{anchor:statements-element} +h3. The _[|#statements-element]_ Element + {html}{html} +h4. Properties + || Property Name || Type || Description || + | addMessageStatement | _java.lang.String_ | {html}{html} | + | binaryDataType | _java.lang.String_ | {html}{html} | + | clearDurableLastAckInTxStatement | _java.lang.String_ | {html}{html} | + | clearXidFlagStatement | _java.lang.String_ | {html}{html} | + | containerNameDataType | _java.lang.String_ | {html}{html} | + | createDurableSubStatement | _java.lang.String_ | {html}{html} | + | createSchemaStatements | (_java.lang.String_)\* | {html}{html} | + | currentDateTimeStatement | _java.lang.String_ | {html}{html} | + | deleteOldMessagesStatementWithPriority | _java.lang.String_ | {html}{html} | + | deleteSubscriptionStatement | _java.lang.String_ | {html}{html} | + | destinationMessageCountStatement | _java.lang.String_ | {html}{html} | + | dropAckPKAlterStatementEnd | _java.lang.String_ | {html}{html} | + | dropSchemaStatements | (_java.lang.String_)\* | {html}{html} | + | durableSubAcksTableName | _java.lang.String_ | {html}{html} | + | durableSubscriberMessageCountStatement | _java.lang.String_ | {html}{html} | + | durableSubscriberMessageCountStatementWithPriority | _java.lang.String_ | {html}{html} | + | findAcksPendingOutcomeStatement | _java.lang.String_ | {html}{html} | + | findAllDestinationsStatement | _java.lang.String_ | {html}{html} | + | findAllDurableSubMessagesStatement | _java.lang.String_ | {html}{html} | + | findAllDurableSubsStatement | _java.lang.String_ | {html}{html} | + | findAllMessagesStatement | _java.lang.String_ | {html}{html} | + | findDurableSubMessagesStatement | _java.lang.String_ | {html}{html} | + | findDurableSubStatement | _java.lang.String_ | {html}{html} | + | findLastSequenceIdInAcksStatement | _java.lang.String_ | {html}{html} | + | findLastSequenceIdInMsgsStatement | _java.lang.String_ | {html}{html} | + | findMessageByIdStatement | _java.lang.String_ | {html}{html} | + | findMessageSequenceIdStatement | _java.lang.String_ | {html}{html} | + | findMessageStatement | _java.lang.String_ | {html}{html} | + | findNextMessagesStatement | _java.lang.String_ | {html}{html} | + | findOpsPendingOutcomeStatement | _java.lang.String_ | {html}{html} | + | findXidByIdStatement | _java.lang.String_ | {html}{html} | + | insertDurablePriorityAckStatement | _java.lang.String_ | {html}{html} | + | lastAckedDurableSubscriberMessageStatement | _java.lang.String_ | {html}{html} | + | lastProducerSequenceIdStatement | _java.lang.String_ | {html}{html} | + | leaseObtainStatement | _java.lang.String_ | {html}{html} | + | leaseOwnerStatement | _java.lang.String_ | {html}{html} | + | leaseUpdateStatement | _java.lang.String_ | {html}{html} | + | lockCreateStatement | _java.lang.String_ | {html}{html} | + | lockTableName | _java.lang.String_ | {html}{html} | + | lockUpdateStatement | _java.lang.String_ | {html}{html} | + | longDataType | _java.lang.String_ | {html}{html} | + | messageTableName | _java.lang.String_ | {html}{html} | + | msgIdDataType | _java.lang.String_ | {html}{html} | + | nextDurableSubscriberMessageStatement | _java.lang.String_ | {html}{html} | + | removeAllMessagesStatement | _java.lang.String_ | {html}{html} | + | removeAllSubscriptionsStatement | _java.lang.String_ | {html}{html} | + | removeMessageStatment | _java.lang.String_ | {html}{html} | + | selectDurablePriorityAckStatement | _java.lang.String_ | {html}{html} | + | sequenceDataType | _java.lang.String_ | {html}{html} | + | stringIdDataType | _java.lang.String_ | {html}{html} | + | tablePrefix | _java.lang.String_ | {html}{html} | + | updateDurableLastAckInTxStatement | _java.lang.String_ | {html}{html} | + | updateDurableLastAckStatement | _java.lang.String_ | {html}{html} | + | updateDurableLastAckWithPriorityInTxStatement | _java.lang.String_ | {html}{html} | + | updateDurableLastAckWithPriorityStatement | _java.lang.String_ | {html}{html} | + | updateLastPriorityAckRowOfDurableSubStatement | _java.lang.String_ | {html}{html} | + | updateMessageStatement | _java.lang.String_ | {html}{html} | + | updateXidFlagStatement | _java.lang.String_ | {html}{html} | + | useExternalMessageReferences | _boolean_ | {html}{html} | + | useLockCreateWhereClause | _boolean_ | {html}{html} | + +{anchor:statisticsBrokerPlugin-element} +h3. The _[|#statisticsBrokerPlugin-element]_ Element + {html}A StatisticsBrokerPlugin +You can retrieve a Map Message for a Destination - or +Broker containing statistics as key-value pairs The message must contain a +replyTo Destination - else its ignored +To retrieve stats on the broker send a empty message to ActiveMQ.Statistics.Broker (Queue or Topic) +With a replyTo set to the destination you want the stats returned to. +To retrieve stats for a destination - e.g. foo - send an empty message to ActiveMQ.Statistics.Destination.foo +- this works with wildcards to - you get a message for each wildcard match on the replyTo destination. +The stats message is a MapMessage populated with statistics for the target{html} + +{anchor:storeCursor-element} +h3. The _[|#storeCursor-element]_ Element + {html}Pending messages{html} + +{anchor:storeDurableSubscriberCursor-element} +h3. The _[|#storeDurableSubscriberCursor-element]_ Element + {html}Pending messages for a durable{html} +h4. Properties + || Property Name || Type || Description || + | immediatePriorityDispatch | _boolean_ | {html}Ensure that new higher priority messages will get an immediate dispatch +rather than wait for the end of the current cursor batch. +Useful when there is a large message backlog and intermittent high priority messages.{html} | + | useCache | _boolean_ | {html}{html} | + +{anchor:storeUsage-element} +h3. The _[|#storeUsage-element]_ Element + {html}Used to keep track of how much of something is being used so that a +productive working set usage can be controlled. Main use case is manage +memory usage.{html} +h4. Properties + || Property Name || Type || Description || + | executor | _java.util.concurrent.ThreadPoolExecutor_ | {html}{html} | + | limit | _java.lang.String_ | {html}Sets the memory limit in bytes. Setting the limit in bytes will set the +usagePortion to 0 since the UsageManager is not going to be portion based +off the parent. +When set using Xbean, values of the form "20 Mb", "1024kb", and "1g" can be used{html} | + | limiter | _[org.apache.activemq.usage.UsageCapacity|#org.apache.activemq.usage.UsageCapacity-types]_ | {html}{html} | + | name | _java.lang.String_ | {html}{html} | + | parent | _T_ | {html}{html} | + | percentUsage | _int_ | {html}{html} | + | percentUsageMinDelta | _java.lang.String_ | {html}Sets the minimum number of percentage points the usage has to change +before a UsageListener event is fired by the manager.{html} | + | pollingTime | _int_ | {html}{html} | + | store | _[org.apache.activemq.store.PersistenceAdapter|#org.apache.activemq.store.PersistenceAdapter-types]_ | {html}{html} | + | usagePortion | _float_ | {html}{html} | + +{anchor:streamJDBCAdapter-element} +h3. The _[|#streamJDBCAdapter-element]_ Element + {html}This JDBCAdapter inserts and extracts BLOB data using the +setBinaryStream()/getBinaryStream() operations. + +The databases/JDBC drivers that use this adapter are: +
      +
    • Axion
    • +
    {html} +h4. Properties + || Property Name || Type || Description || + | batchStatments | _boolean_ | {html}{html} | + | maxRows | _int_ | {html}{html} | + | statements | _[org.apache.activemq.store.jdbc.Statements|#org.apache.activemq.store.jdbc.Statements-types]_ | {html}{html} | + | useExternalMessageReferences | _boolean_ | {html}{html} | + +{anchor:strictOrderDispatchPolicy-element} +h3. The _[|#strictOrderDispatchPolicy-element]_ Element + {html}Dispatch policy that causes every subscription to see messages in the same +order.{html} + +{anchor:sybase-jdbc-adapter-element} +h3. The _[|#sybase-jdbc-adapter-element]_ Element + {html}A JDBC Adapter for Sybase databases{html} +h4. Properties + || Property Name || Type || Description || + | batchStatments | _boolean_ | {html}{html} | + | maxRows | _int_ | {html}{html} | + | statements | _[org.apache.activemq.store.jdbc.Statements|#org.apache.activemq.store.jdbc.Statements-types]_ | {html}{html} | + | useExternalMessageReferences | _boolean_ | {html}{html} | + +{anchor:systemUsage-element} +h3. The _[|#systemUsage-element]_ Element + {html}Holder for Usage instances for memory, store and temp files Main use case is +manage memory usage.{html} +h4. Properties + || Property Name || Type || Description || + | adapter | _[org.apache.activemq.store.PersistenceAdapter|#org.apache.activemq.store.PersistenceAdapter-types]_ | {html}{html} | + | executor | _java.util.concurrent.ThreadPoolExecutor_ | {html}{html} | + | memoryUsage | _[org.apache.activemq.usage.MemoryUsage|#org.apache.activemq.usage.MemoryUsage-types]_ | {html}{html} | + | name | _java.lang.String_ | {html}{html} | + | parent | _[org.apache.activemq.usage.SystemUsage|#org.apache.activemq.usage.SystemUsage-types]_ | {html}{html} | + | sendFailIfNoSpace | _boolean_ | {html}Sets whether or not a send() should fail if there is no space free. The +default value is false which means to block the send() method until space +becomes available{html} | + | sendFailIfNoSpaceAfterTimeout | _long_ | {html}{html} | + | sendFailIfNoSpaceExplicitySet | _boolean_ | {html}{html} | + | storeUsage | _[org.apache.activemq.usage.StoreUsage|#org.apache.activemq.usage.StoreUsage-types]_ | {html}{html} | + | tempStore | _[org.apache.activemq.store.kahadb.plist.PListStore|#org.apache.activemq.store.kahadb.plist.PListStore-types]_ | {html}{html} | + | tempUsage | _[org.apache.activemq.usage.TempUsage|#org.apache.activemq.usage.TempUsage-types]_ | {html}{html} | + +{anchor:taskRunnerFactory-element} +h3. The _[|#taskRunnerFactory-element]_ Element + {html}Manages the thread pool for long running tasks. Long running tasks are not +always active but when they are active, they may need a few iterations of +processing for them to become idle. The manager ensures that each task is +processes but that no one task overtakes the system. This is kinda like +cooperative multitasking.{html} +h4. Properties + || Property Name || Type || Description || + | daemon | _boolean_ | {html}{html} | + | dedicatedTaskRunner | _boolean_ | {html}{html} | + | executor | _java.util.concurrent.ExecutorService_ | {html}{html} | + | maxIterationsPerRun | _int_ | {html}{html} | + | maxThreadPoolSize | _int_ | {html}{html} | + | name | _java.lang.String_ | {html}{html} | + | priority | _int_ | {html}{html} | + | rejectedTaskHandler | _java.util.concurrent.RejectedExecutionHandler_ | {html}{html} | + | shutdownAwaitTermination | _long_ | {html}{html} | + +{anchor:tempDestinationAuthorizationEntry-element} +h3. The _[|#tempDestinationAuthorizationEntry-element]_ Element + {html}Represents an entry in a {@link DefaultAuthorizationMap} for assigning +different operations (read, write, admin) of user roles to a temporary +destination{html} +h4. Properties + || Property Name || Type || Description || + | admin | _java.lang.String_ | {html}{html} | + | adminACLs | (_java.lang.Object_)\* | {html}{html} | + | destination | _[org.apache.activemq.command.ActiveMQDestination|#org.apache.activemq.command.ActiveMQDestination-types]_ | {html}{html} | + | groupClass | _java.lang.String_ | {html}{html} | + | queue | _java.lang.String_ | {html}A helper method to set the destination from a configuration file{html} | + | read | _java.lang.String_ | {html}{html} | + | readACLs | (_java.lang.Object_)\* | {html}{html} | + | tempQueue | _boolean_ | {html}{html} | + | tempTopic | _boolean_ | {html}{html} | + | topic | _java.lang.String_ | {html}A helper method to set the destination from a configuration file{html} | + | write | _java.lang.String_ | {html}{html} | + | writeACLs | (_java.lang.Object_)\* | {html}{html} | + +{anchor:tempQueue-element} +h3. The _[|#tempQueue-element]_ Element + {html}An ActiveMQ Temporary Queue Destination{html} +h4. Properties + || Property Name || Type || Description || + | compositeDestinations | (_[org.apache.activemq.command.ActiveMQDestination|#org.apache.activemq.command.ActiveMQDestination-types]_)\* | {html}{html} | + | connection | _org.apache.activemq.ActiveMQConnection_ | {html}{html} | + | connectionId | _java.lang.String_ | {html}{html} | + | name | _java.lang.String_ | {html}{html} | + | physicalName | _java.lang.String_ | {html}{html} | + | properties | _java.util.Properties_ | {html}Get the properties from this instance for storing in JNDI{html} | + | sequenceId | _long_ | {html}{html} | + +{anchor:tempTopic-element} +h3. The _[|#tempTopic-element]_ Element + {html}An ActiveMQ Temporary Topic Destination{html} +h4. Properties + || Property Name || Type || Description || + | compositeDestinations | (_[org.apache.activemq.command.ActiveMQDestination|#org.apache.activemq.command.ActiveMQDestination-types]_)\* | {html}{html} | + | connection | _org.apache.activemq.ActiveMQConnection_ | {html}{html} | + | connectionId | _java.lang.String_ | {html}{html} | + | name | _java.lang.String_ | {html}{html} | + | physicalName | _java.lang.String_ | {html}{html} | + | properties | _java.util.Properties_ | {html}Get the properties from this instance for storing in JNDI{html} | + | sequenceId | _long_ | {html}{html} | + +{anchor:tempUsage-element} +h3. The _[|#tempUsage-element]_ Element + {html}Used to keep track of how much of something is being used so that a +productive working set usage can be controlled. Main use case is manage +memory usage.{html} +h4. Properties + || Property Name || Type || Description || + | executor | _java.util.concurrent.ThreadPoolExecutor_ | {html}{html} | + | limit | _java.lang.String_ | {html}Sets the memory limit in bytes. Setting the limit in bytes will set the +usagePortion to 0 since the UsageManager is not going to be portion based +off the parent. +When set using Xbean, values of the form "20 Mb", "1024kb", and "1g" can be used{html} | + | limiter | _[org.apache.activemq.usage.UsageCapacity|#org.apache.activemq.usage.UsageCapacity-types]_ | {html}{html} | + | name | _java.lang.String_ | {html}{html} | + | parent | _T_ | {html}{html} | + | percentUsage | _int_ | {html}{html} | + | percentUsageMinDelta | _java.lang.String_ | {html}Sets the minimum number of percentage points the usage has to change +before a UsageListener event is fired by the manager.{html} | + | pollingTime | _int_ | {html}{html} | + | store | _[org.apache.activemq.store.kahadb.plist.PListStore|#org.apache.activemq.store.kahadb.plist.PListStore-types]_ | {html}{html} | + | usagePortion | _float_ | {html}{html} | + +{anchor:timeStampingBrokerPlugin-element} +h3. The _[|#timeStampingBrokerPlugin-element]_ Element + {html}A Broker interceptor which updates a JMS Client's timestamp on the message +with a broker timestamp. Useful when the clocks on client machines are known +to not be correct and you can only trust the time set on the broker machines. + +Enabling this plugin will break JMS compliance since the timestamp that the +producer sees on the messages after as send() will be different from the +timestamp the consumer will observe when he receives the message. This plugin +is not enabled in the default ActiveMQ configuration. + +2 new attributes have been added which will allow the administrator some override control +over the expiration time for incoming messages: + +Attribute 'zeroExpirationOverride' can be used to apply an expiration +time to incoming messages with no expiration defined (messages that would never expire) + +Attribute 'ttlCeiling' can be used to apply a limit to the expiration time{html} +h4. Properties + || Property Name || Type || Description || + | adminConnectionContext | _org.apache.activemq.broker.ConnectionContext_ | {html}{html} | + | futureOnly | _boolean_ | {html}{html} | + | next | _[org.apache.activemq.broker.Broker|#org.apache.activemq.broker.Broker-types]_ | {html}{html} | + | processNetworkMessages | _java.lang.Boolean_ | {html}{html} | + | ttlCeiling | _long_ | {html}setter method for ttlCeiling{html} | + | zeroExpirationOverride | _long_ | {html}setter method for zeroExpirationOverride{html} | + +{anchor:timedSubscriptionRecoveryPolicy-element} +h3. The _[|#timedSubscriptionRecoveryPolicy-element]_ Element + {html}This implementation of {@link SubscriptionRecoveryPolicy} will keep a timed +buffer of messages around in memory and use that to recover new +subscriptions.{html} +h4. Properties + || Property Name || Type || Description || + | broker | _[org.apache.activemq.broker.Broker|#org.apache.activemq.broker.Broker-types]_ | {html}{html} | + | recoverDuration | _long_ | {html}{html} | + +{anchor:topic-element} +h3. The _[|#topic-element]_ Element + {html}An ActiveMQ Topic{html} +h4. Properties + || Property Name || Type || Description || + | compositeDestinations | (_[org.apache.activemq.command.ActiveMQDestination|#org.apache.activemq.command.ActiveMQDestination-types]_)\* | {html}{html} | + | name | _java.lang.String_ | {html}{html} | + | physicalName | _java.lang.String_ | {html}{html} | + | properties | _java.util.Properties_ | {html}Get the properties from this instance for storing in JNDI{html} | + +{anchor:traceBrokerPathPlugin-element} +h3. The _[|#traceBrokerPathPlugin-element]_ Element + {html}The TraceBrokerPathPlugin can be used in a network of Brokers. Each Broker +that has the plugin configured, will add it's brokerName to the content +of a JMS Property. If all Brokers have this property enabled, the path the +message actually took through the network can be seen in the defined property.{html} +h4. Properties + || Property Name || Type || Description || + | adminConnectionContext | _org.apache.activemq.broker.ConnectionContext_ | {html}{html} | + | next | _[org.apache.activemq.broker.Broker|#org.apache.activemq.broker.Broker-types]_ | {html}{html} | + | stampProperty | _java.lang.String_ | {html}{html} | + +{anchor:transact-database-locker-element} +h3. The _[|#transact-database-locker-element]_ Element + {html}Represents an exclusive lock on a database to avoid multiple brokers running +against the same logical database.{html} +h4. Properties + || Property Name || Type || Description || + | exceptionHandler | _org.apache.activemq.util.Handler_ | {html}{html} | + | failIfLocked | _boolean_ | {html}{html} | + | lockAcquireSleepInterval | _long_ | {html}{html} | + | name | _java.lang.String_ | {html}{html} | + | queryTimeout | _int_ | {html}{html} | + +{anchor:transact-jdbc-adapter-element} +h3. The _[|#transact-jdbc-adapter-element]_ Element + {html}A JDBC Adapter for Transact-SQL based databases such as SQL Server or Sybase{html} +h4. Properties + || Property Name || Type || Description || + | batchStatments | _boolean_ | {html}{html} | + | maxRows | _int_ | {html}{html} | + | statements | _[org.apache.activemq.store.jdbc.Statements|#org.apache.activemq.store.jdbc.Statements-types]_ | {html}{html} | + | useExternalMessageReferences | _boolean_ | {html}{html} | + +{anchor:transportConnector-element} +h3. The _[|#transportConnector-element]_ Element + {html}{html} +h4. Properties + || Property Name || Type || Description || + | auditNetworkProducers | _boolean_ | {html}Enable a producer audit on network connections, Traps the case of a missing send reply and resend. +Note: does not work with conduit=false, networked composite destinations or networked virtual topics{html} | + | brokerInfo | _org.apache.activemq.command.BrokerInfo_ | {html}{html} | + | brokerService | _[org.apache.activemq.broker.BrokerService|#org.apache.activemq.broker.BrokerService-types]_ | {html}This is called by the BrokerService right before it starts the transport.{html} | + | disableAsyncDispatch | _boolean_ | {html}{html} | + | discoveryAgent | _org.apache.activemq.transport.discovery.DiscoveryAgent_ | {html}{html} | + | discoveryUri | _java.net.URI_ | {html}{html} | + | enableStatusMonitor | _boolean_ | {html}{html} | + | maximumConsumersAllowedPerConnection | _int_ | {html}{html} | + | maximumProducersAllowedPerConnection | _int_ | {html}{html} | + | messageAuthorizationPolicy | _org.apache.activemq.security.MessageAuthorizationPolicy_ | {html}Sets the policy used to decide if the current connection is authorized to +consume a given message{html} | + | name | _java.lang.String_ | {html}{html} | + | rebalanceClusterClients | _boolean_ | {html}{html} | + | server | _org.apache.activemq.transport.TransportServer_ | {html}{html} | + | taskRunnerFactory | _[org.apache.activemq.thread.TaskRunnerFactory|#org.apache.activemq.thread.TaskRunnerFactory-types]_ | {html}{html} | + | updateClusterClients | _boolean_ | {html}{html} | + | updateClusterClientsOnRemove | _boolean_ | {html}{html} | + | updateClusterFilter | _java.lang.String_ | {html}{html} | + | uri | _java.net.URI_ | {html}Sets the server transport URI to use if there is not a +{@link TransportServer} configured via the +{@link #setServer(TransportServer)} method. This value is used to lazy +create a {@link TransportServer} instance{html} | + +{anchor:udpTraceBrokerPlugin-element} +h3. The _[|#udpTraceBrokerPlugin-element]_ Element + {html}A Broker interceptor which allows you to trace all operations to a UDP +socket.{html} +h4. Properties + || Property Name || Type || Description || + | address | _java.net.SocketAddress_ | {html}{html} | + | adminConnectionContext | _org.apache.activemq.broker.ConnectionContext_ | {html}{html} | + | broadcast | _boolean_ | {html}{html} | + | destination | _java.net.URI_ | {html}{html} | + | maxTraceDatagramSize | _int_ | {html}{html} | + | next | _[org.apache.activemq.broker.Broker|#org.apache.activemq.broker.Broker-types]_ | {html}{html} | + | wireFormat | _org.apache.activemq.wireformat.WireFormat_ | {html}{html} | + | wireFormatFactory | _org.apache.activemq.wireformat.WireFormatFactory_ | {html}{html} | + +{anchor:uniquePropertyMessageEvictionStrategy-element} +h3. The _[|#uniquePropertyMessageEvictionStrategy-element]_ Element + {html}An eviction strategy which evicts the oldest message within messages with the same property value{html} +h4. Properties + || Property Name || Type || Description || + | evictExpiredMessagesHighWatermark | _int_ | {html}Sets the high water mark on which we will eagerly evict expired messages from RAM{html} | + | propertyName | _java.lang.String_ | {html}{html} | + +{anchor:usageCapacity-element} +h3. The _[|#usageCapacity-element]_ Element + {html}Identify if a limit has been reached{html} +h4. Properties + || Property Name || Type || Description || + | limit | _long_ | {html}{html} | + +{anchor:virtualDestinationInterceptor-element} +h3. The _[|#virtualDestinationInterceptor-element]_ Element + {html}Implements Virtual Topics.{html} +h4. Properties + || Property Name || Type || Description || + | virtualDestinations | (_[org.apache.activemq.broker.region.virtual.VirtualDestination|#org.apache.activemq.broker.region.virtual.VirtualDestination-types]_)\* | {html}{html} | + +{anchor:virtualSelectorCacheBrokerPlugin-element} +h3. The _[|#virtualSelectorCacheBrokerPlugin-element]_ Element + {html}A plugin which allows the caching of the selector from a subscription queue. +

    +This stops the build-up of unwanted messages, especially when consumers may +disconnect from time to time when using virtual destinations. +

    +This is influenced by code snippets developed by Maciej Rakowicz{html} +h4. Properties + || Property Name || Type || Description || + | persistFile | _java.io.File_ | {html}Sets the location of the persistent cache{html} | + +{anchor:virtualTopic-element} +h3. The _[|#virtualTopic-element]_ Element + {html}Creates Virtual +Topics using a prefix and postfix. The virtual destination creates a +wildcard that is then used to look up all active queue subscriptions which +match.{html} +h4. Properties + || Property Name || Type || Description || + | local | _boolean_ | {html}{html} | + | name | _java.lang.String_ | {html}{html} | + | postfix | _java.lang.String_ | {html}Sets any postix used to identify the queue consumers{html} | + | prefix | _java.lang.String_ | {html}Sets the prefix wildcard used to identify the queue consumers for a given +topic{html} | + | selectorAware | _boolean_ | {html}Indicates whether the selectors of consumers are used to determine dispatch +to a virtual destination, when true only messages matching an existing +consumer will be dispatched.{html} | + +{anchor:vmCursor-element} +h3. The _[|#vmCursor-element]_ Element + {html}Pending messages held{html} + +{anchor:vmDurableCursor-element} +h3. The _[|#vmDurableCursor-element]_ Element + {html}Pending{html} + +{anchor:vmQueueCursor-element} +h3. The _[|#vmQueueCursor-element]_ Element + {html}Pending messages{html} + +{anchor:xaConnectionFactory-element} +h3. The _[|#xaConnectionFactory-element]_ Element + {html}A Spring enhanced XA connection +factory which will automatically use the Spring bean name as the clientIDPrefix property +so that connections created have client IDs related to your Spring.xml file for +easier comprehension from JMX.{html} +h4. Properties + || Property Name || Type || Description || + | alwaysSessionAsync | _boolean_ | {html}If this flag is not set then a separate thread is not used for dispatching messages for each Session in +the Connection. However, a separate thread is always used if there is more than one session, or the session +isn't in auto acknowledge or duplicates ok mode. By default this value is set to true and session dispatch +happens asynchronously.{html} | + | alwaysSyncSend | _boolean_ | {html}Set true if always require messages to be sync sent{html} | + | auditDepth | _int_ | {html}{html} | + | auditMaximumProducerNumber | _int_ | {html}{html} | + | beanName | _java.lang.String_ | {html}{html} | + | blobTransferPolicy | _org.apache.activemq.blob.BlobTransferPolicy_ | {html}Sets the policy used to describe how out-of-band BLOBs (Binary Large +OBjects) are transferred from producers to brokers to consumers{html} | + | brokerURL | _java.lang.String_ | {html}Sets the connection +URL used to connect to the ActiveMQ broker.{html} | + | checkForDuplicates | _boolean_ | {html}{html} | + | clientID | _java.lang.String_ | {html}Sets the JMS clientID to use for the created connection. Note that this +can only be used by one connection at once so generally its a better idea +to set the clientID on a Connection{html} | + | clientIDPrefix | _java.lang.String_ | {html}Sets the prefix used by autogenerated JMS Client ID values which are used +if the JMS client does not explicitly specify on.{html} | + | clientIdGenerator | _org.apache.activemq.util.IdGenerator_ | {html}{html} | + | clientInternalExceptionListener | _org.apache.activemq.ClientInternalExceptionListener_ | {html}Allows an {@link ClientInternalExceptionListener} to be configured on the ConnectionFactory so that when this factory +is used by frameworks which don't expose the Connection such as Spring JmsTemplate, you can register +an exception listener. +

    Note: access to this clientInternalExceptionListener will not be serialized if it is associated with more than +on connection (as it will be if more than one connection is subsequently created by this connection factory){html} | + | closeTimeout | _int_ | {html}Sets the timeout before a close is considered complete. Normally a +close() on a connection waits for confirmation from the broker; this +allows that operation to timeout to save the client hanging if there is +no broker{html} | + | connectionIDPrefix | _java.lang.String_ | {html}Sets the prefix used by connection id generator{html} | + | connectionIdGenerator | _org.apache.activemq.util.IdGenerator_ | {html}{html} | + | consumerFailoverRedeliveryWaitPeriod | _long_ | {html}{html} | + | copyMessageOnSend | _boolean_ | {html}Should a JMS message be copied to a new JMS Message object as part of the +send() method in JMS. This is enabled by default to be compliant with the +JMS specification. You can disable it if you do not mutate JMS messages +after they are sent for a performance boost{html} | + | disableTimeStampsByDefault | _boolean_ | {html}Sets whether or not timestamps on messages should be disabled or not. If +you disable them it adds a small performance boost.{html} | + | dispatchAsync | _boolean_ | {html}Enables or disables the default setting of whether or not consumers have +their messages dispatched +synchronously or asynchronously by the broker. For non-durable +topics for example we typically dispatch synchronously by default to +minimize context switches which boost performance. However sometimes its +better to go slower to ensure that a single blocked consumer socket does +not block delivery to other consumers.{html} | + | exceptionListener | _[javax.jms.ExceptionListener|#javax.jms.ExceptionListener-types]_ | {html}Allows an {@link ExceptionListener} to be configured on the ConnectionFactory so that when this factory +is used by frameworks which don't expose the Connection such as Spring JmsTemplate, you can register +an exception listener. +

    Note: access to this exceptionLinstener will not be serialized if it is associated with more than +on connection (as it will be if more than one connection is subsequently created by this connection factory){html} | + | exclusiveConsumer | _boolean_ | {html}Enables or disables whether or not queue consumers should be exclusive or +not for example to preserve ordering when not using Message Groups{html} | + | maxThreadPoolSize | _int_ | {html}{html} | + | messagePrioritySupported | _boolean_ | {html}{html} | + | nestedMapAndListEnabled | _boolean_ | {html}Enables/disables whether or not Message properties and MapMessage entries +support Nested +Structures of Map and List objects{html} | + | nonBlockingRedelivery | _boolean_ | {html}When true a MessageConsumer will not stop Message delivery before re-delivering Messages +from a rolled back transaction. This implies that message order will not be preserved and +also will result in the TransactedIndividualAck option to be enabled.{html} | + | objectMessageSerializationDefered | _boolean_ | {html}When an object is set on an ObjectMessage, the JMS spec requires the +object to be serialized by that set method. Enabling this flag causes the +object to not get serialized. The object may subsequently get serialized +if the message needs to be sent over a socket or stored to disk.{html} | + | optimizeAcknowledge | _boolean_ | {html}{html} | + | optimizeAcknowledgeTimeOut | _long_ | {html}The max time in milliseconds between optimized ack batches{html} | + | optimizedAckScheduledAckInterval | _long_ | {html}Gets the configured time interval that is used to force all MessageConsumers that have optimizedAcknowledge enabled +to send an ack for any outstanding Message Acks. By default this value is set to zero meaning that the consumers +will not do any background Message acknowledgment.{html} | + | optimizedMessageDispatch | _boolean_ | {html}If this flag is set then an larger prefetch limit is used - only +applicable for durable topic subscribers.{html} | + | password | _java.lang.String_ | {html}Sets the JMS password used for connections created from this factory{html} | + | prefetchPolicy | _[org.apache.activemq.ActiveMQPrefetchPolicy|#org.apache.activemq.ActiveMQPrefetchPolicy-types]_ | {html}Sets the prefetch +policy for consumers created by this connection.{html} | + | producerWindowSize | _int_ | {html}{html} | + | properties | _java.util.Properties_ | {html}Get the properties from this instance for storing in JNDI{html} | + | redeliveryPolicy | _[org.apache.activemq.RedeliveryPolicy|#org.apache.activemq.RedeliveryPolicy-types]_ | {html}Sets the global default redelivery policy to be used when a message is delivered +but the session is rolled back{html} | + | redeliveryPolicyMap | _[org.apache.activemq.broker.region.policy.RedeliveryPolicyMap|#org.apache.activemq.broker.region.policy.RedeliveryPolicyMap-types]_ | {html}Sets the global redelivery policy mapping to be used when a message is delivered +but the session is rolled back{html} | + | rejectedTaskHandler | _java.util.concurrent.RejectedExecutionHandler_ | {html}{html} | + | sendAcksAsync | _boolean_ | {html}{html} | + | sendTimeout | _int_ | {html}{html} | + | sessionTaskRunner | _[org.apache.activemq.thread.TaskRunnerFactory|#org.apache.activemq.thread.TaskRunnerFactory-types]_ | {html}{html} | + | statsEnabled | _boolean_ | {html}{html} | + | transactedIndividualAck | _boolean_ | {html}when true, submit individual transacted acks immediately rather than with transaction completion. +This allows the acks to represent delivery status which can be persisted on rollback +Used in conjunction with org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter#setRewriteOnRedelivery(boolean) true{html} | + | transformer | _org.apache.activemq.MessageTransformer_ | {html}Sets the transformer used to transform messages before they are sent on +to the JMS bus or when they are received from the bus but before they are +delivered to the JMS client{html} | + | transportListener | _org.apache.activemq.transport.TransportListener_ | {html}Allows a listener to be configured on the ConnectionFactory so that when this factory is used +with frameworks which don't expose the Connection such as Spring JmsTemplate, you can still register +a transport listener.{html} | + | useAsyncSend | _boolean_ | {html}Forces the use of Async Sends which +adds a massive performance boost; but means that the send() method will +return immediately whether the message has been sent or not which could +lead to message loss.{html} | + | useBeanNameAsClientIdPrefix | _boolean_ | {html}{html} | + | useCompression | _boolean_ | {html}Enables the use of compression of the message bodies{html} | + | useDedicatedTaskRunner | _boolean_ | {html}{html} | + | useRetroactiveConsumer | _boolean_ | {html}Sets whether or not retroactive consumers are enabled. Retroactive +consumers allow non-durable topic subscribers to receive old messages +that were published before the non-durable subscriber started.{html} | + | userName | _java.lang.String_ | {html}Sets the JMS userName used by connections created by this factory{html} | + | warnAboutUnstartedConnectionTimeout | _long_ | {html}Enables the timeout from a connection creation to when a warning is +generated if the connection is not properly started via +{@link Connection#start()} and a message is received by a consumer. It is +a very common gotcha to forget to start +the connection so this option makes the default case to create a +warning if the user forgets. To disable the warning just set the value to < +0 (say -1).{html} | + | watchTopicAdvisories | _boolean_ | {html}{html} | + +h3. Element Index + | _[|#abortSlowConsumerStrategy-element]_ | {html}Abort slow consumers when they reach the configured threshold of slowness, default is slow for 30 seconds{html} | + | _[|#amqPersistenceAdapter-element]_ | {html}An implementation of {@link PersistenceAdapter} designed for use with a +{@link Journal} and then check pointing asynchronously on a timeout with some +other long term persistent storage.{html} | + | _[|#amqPersistenceAdapterFactory-element]_ | {html}An implementation of {@link PersistenceAdapterFactory}{html} | + | _[|#authenticationUser-element]_ | {html}A helper object used to configure simple authentiaction plugin{html} | + | _[|#authorizationEntry-element]_ | {html}Represents an entry in a {@link DefaultAuthorizationMap} for assigning +different operations (read, write, admin) of user roles to a specific +destination or a hierarchical wildcard area of destinations.{html} | + | _[|#authorizationMap-element]_ | {html}Represents a destination based configuration of policies so that individual +destinations or wildcard hierarchies of destinations can be configured using +different policies. Each entry in the map represents the authorization ACLs +for each operation.{html} | + | _[|#authorizationPlugin-element]_ | {html}An authorization plugin where each operation on a destination is checked +against an authorizationMap{html} | + | _[|#axionJDBCAdapter-element]_ | {html}Axion specific Adapter. + +Axion does not seem to support ALTER statements or sub-selects. This means: +- We cannot auto upgrade the schema was we roll out new versions of ActiveMQ +- We cannot delete durable sub messages that have be acknowledged by all consumers.{html} | + | _[|#blobJDBCAdapter-element]_ | {html}This JDBCAdapter inserts and extracts BLOB data using the getBlob()/setBlob() +operations. This is a little more involved since to insert a blob you have +to: + +1: insert empty blob. 2: select the blob 3: finally update the blob with data +value. + +The databases/JDBC drivers that use this adapter are: +

      +
    • +
    {html} | + | _[|#broker-element]_ | {html}An ActiveMQ Message Broker. It consists of a number of transport +connectors, network connectors and a bunch of properties which can be used to +configure the broker as its lazily created.{html} | + | _[|#brokerService-element]_ | {html}Manages the lifecycle of an ActiveMQ Broker. A BrokerService consists of a +number of transport connectors, network connectors and a bunch of properties +which can be used to configure the broker as its lazily created.{html} | + | _[|#bytesJDBCAdapter-element]_ | {html}This JDBCAdapter inserts and extracts BLOB data using the +setBytes()/getBytes() operations. The databases/JDBC drivers that use this +adapter are:{html} | + | _[|#cachedLDAPAuthorizationMap-element]_ | {html}A {@link DefaultAuthorizationMap} implementation which uses LDAP to initialize and update authorization +policy.{html} | + | _[|#commandAgent-element]_ | {html}An agent which listens to commands on a JMS destination{html} | + | _[|#compositeDemandForwardingBridge-element]_ | {html}A demand forwarding bridge which works with multicast style transports where +a single Transport could be communicating with multiple remote brokers{html} | + | _[|#compositeQueue-element]_ | {html}Represents a virtual queue which forwards to a number of other destinations.{html} | + | _[|#compositeTopic-element]_ | {html}Represents a virtual topic which forwards to a number of other destinations.{html} | + | _[|#conditionalNetworkBridgeFilterFactory-element]_ | {html}implement conditional behaviour for queue consumers, +allows replaying back to origin if no consumers are present on the local broker +after a configurable delay, irrespective of the networkTTL +Also allows rate limiting of messages through the network, useful for static includes{html} | + | _[|#connectionDotFilePlugin-element]_ | {html}A DOT file creator plugin which +creates a DOT file showing the current connections{html} | + | _[|#connectionFactory-element]_ | {html}A Spring enhanced connection +factory which will automatically use the Spring bean name as the clientIDPrefix property +so that connections created have client IDs related to your Spring.xml file for +easier comprehension from JMX.{html} | + | _[|#constantPendingMessageLimitStrategy-element]_ | {html}This PendingMessageLimitStrategy is configured to a constant value for all subscriptions.{html} | + | _[|#database-locker-element]_ | {html}Represents an exclusive lock on a database to avoid multiple brokers running +against the same logical database.{html} | + | _[|#db2JDBCAdapter-element]_ | {html}{html} | + | _[|#defaultIOExceptionHandler-element]_ | {html}{html} | + | _[|#defaultJDBCAdapter-element]_ | {html}Implements all the default JDBC operations that are used by the JDBCPersistenceAdapter.

    sub-classing is +encouraged to override the default implementation of methods to account for differences in JDBC Driver +implementations.

    The JDBCAdapter inserts and extracts BLOB data using the getBytes()/setBytes() operations.

    +The databases/JDBC drivers that use this adapter are: +

      +
    • +
    {html} | + | _[|#defaultNetworkBridgeFilterFactory-element]_ | {html}implement default behaviour, filter that will not allow resend to origin +based on brokerPath and which respects networkTTL{html} | + | _[|#defaultUsageCapacity-element]_ | {html}Identify if a limit has been reached{html} | + | _[|#demandForwardingBridge-element]_ | {html}Forwards messages from the local broker to the remote broker based on demand.{html} | + | _[|#destinationDotFilePlugin-element]_ | {html}A DOT +file creator plugin which creates a DOT file showing the current topic & queue hierarchies.{html} | + | _[|#destinationEntry-element]_ | {html}A default entry in a DestinationMap which holds a single value.{html} | + | _[|#destinationPathSeparatorPlugin-element]_ | {html}{html} | + | _[|#discardingDLQBrokerPlugin-element]_ | {html}{html} | + | _[|#fileCursor-element]_ | {html}Pending messages{html} | + | _[|#fileDurableSubscriberCursor-element]_ | {html}Pending messages for durable subscribers{html} | + | _[|#fileQueueCursor-element]_ | {html}Pending{html} | + | _[|#filteredDestination-element]_ | {html}Represents a destination which is filtered using some predicate such as a selector +so that messages are only dispatched to the destination if they match the filter.{html} | + | _[|#filteredKahaDB-element]_ | {html}{html} | + | _[|#fixedCountSubscriptionRecoveryPolicy-element]_ | {html}This implementation of {@link SubscriptionRecoveryPolicy} will keep a fixed +count of last messages.{html} | + | _[|#fixedSizedSubscriptionRecoveryPolicy-element]_ | {html}This implementation of {@link SubscriptionRecoveryPolicy} will keep a fixed +amount of memory available in RAM for message history which is evicted in +time order.{html} | + | _[|#forcePersistencyModeBroker-element]_ | {html}A Plugin which allows to force every incoming message to be PERSISTENT or NON-PERSISTENT. + +Useful, if you have set the broker usage policy to process ONLY persistent or ONLY non-persistent +messages.{html} | + | _[|#forcePersistencyModeBrokerPlugin-element]_ | {html}A Plugin which allows to force every incoming message to be PERSISTENT or NON-PERSISTENT. + +Useful, if you have set the broker usage policy to process ONLY persistent or ONLY non-persistent +messages.{html} | + | _[|#forwardingBridge-element]_ | {html}Forwards all messages from the local broker to the remote broker.{html} | + | _[|#hsqldb-jdbc-adapter-element]_ | {html}{html} | + | _[|#imageBasedJDBCAdaptor-element]_ | {html}Provides JDBCAdapter since that uses +IMAGE datatype to hold binary data. + +The databases/JDBC drivers that use this adapter are: +
      +
    • Sybase
    • +
    • MS SQL
    • +
    {html} | + | _[|#inboundQueueBridge-element]_ | {html}Create an Inbound Queue Bridge. By default this class uses the sname name for +both the inbound and outbound queue. This behavior can be overridden however +by using the setter methods to configure both the inbound and outboud queue names +separately.{html} | + | _[|#inboundTopicBridge-element]_ | {html}Create an Inbound Topic Bridge. By default this class uses the topic name for +both the inbound and outbound topic. This behavior can be overridden however +by using the setter methods to configure both the inbound and outboud topic names +separately.{html} | + | _[|#individualDeadLetterStrategy-element]_ | {html}A {@link DeadLetterStrategy} where each destination has its own individual +DLQ using the subject naming hierarchy.{html} | + | _[|#informixJDBCAdapter-element]_ | {html}JDBC Adapter for Informix database. +Because Informix database restricts length of composite primary keys, length of +container name field and subscription id field must be reduced to 150 characters. +Therefore be sure not to use longer names for container name and subscription id than 150 characters.{html} | + | _[|#jDBCIOExceptionHandler-element]_ | {html}{html} | + | _[|#jaasAuthenticationPlugin-element]_ | {html}Provides a JAAS based authentication plugin{html} | + | _[|#jaasCertificateAuthenticationPlugin-element]_ | {html}Provides a JAAS based SSL certificate authentication plugin{html} | + | _[|#jaasDualAuthenticationPlugin-element]_ | {html}Provides a JAAS based authentication plugin{html} | + | _[|#jdbcPersistenceAdapter-element]_ | {html}A {@link PersistenceAdapter} implementation using JDBC for persistence +storage. + +This persistence adapter will correctly remember prepared XA transactions, +but it will not keep track of local transaction commits so that operations +performed against the Message store are done as a single uow.{html} | + | _[|#jmsQueueConnector-element]_ | {html}A Bridge to other JMS Queue providers{html} | + | _[|#jmsTopicConnector-element]_ | {html}A Bridge to other JMS Topic providers{html} | + | _[|#journalPersistenceAdapter-element]_ | {html}An implementation of {@link PersistenceAdapter} designed for use with a +{@link Journal} and then check pointing asynchronously on a timeout with some +other long term persistent storage.{html} | + | _[|#journalPersistenceAdapterFactory-element]_ | {html}Factory class that can create PersistenceAdapter objects.{html} | + | _[|#journaledJDBC-element]_ | {html}Creates a default persistence model using the Journal and JDBC{html} | + | _[|#kahaDB-element]_ | {html}An implementation of {@link PersistenceAdapter} designed for use with +KahaDB - Embedded Lightweight Non-Relational Database{html} | + | _[|#kahaPersistenceAdapter-element]_ | {html}{html} | + | _[|#lDAPAuthorizationMap-element]_ | {html}An {@link AuthorizationMap} which uses LDAP{html} | + | _[|#lastImageSubscriptionRecoveryPolicy-element]_ | {html}This implementation of {@link SubscriptionRecoveryPolicy} will only keep the +last message.{html} | + | _[|#ldapNetworkConnector-element]_ | {html}class to create dynamic network connectors listed in an directory +server using the LDAP v3 protocol as defined in RFC 2251, the +entries listed in the directory server must implement the ipHost +and ipService objectClasses as defined in RFC 2307.{html} | + | _[|#lease-database-locker-element]_ | {html}Represents an exclusive lease on a database to avoid multiple brokers running +against the same logical database.{html} | + | _[|#levelDB-element]_ | {html}An implementation of {@link org.apache.activemq.store.PersistenceAdapter} designed for use with +LevelDB - Embedded Lightweight Non-Relational Database{html} | + | _[|#loggingBrokerPlugin-element]_ | {html}A simple Broker intercepter which allows you to enable/disable logging.{html} | + | _[|#mKahaDB-element]_ | {html}An implementation of {@link org.apache.activemq.store.PersistenceAdapter} that supports +distribution of destinations across multiple kahaDB persistence adapters{html} | + | _[|#managementContext-element]_ | {html}An abstraction over JMX mbean registration{html} | + | _[|#masterConnector-element]_ | {html}Connects a Slave Broker to a Master when using Master Slave for High +Availability of messages.{html} | + | _[|#maxdb-jdbc-adapter-element]_ | {html}JDBC Adapter for the MaxDB database.{html} | + | _[|#memoryPersistenceAdapter-element]_ | {html}{html} | + | _[|#memoryUsage-element]_ | {html}Used to keep track of how much of something is being used so that a +productive working set usage can be controlled. Main use case is manage +memory usage.{html} | + | _[|#messageGroupHashBucketFactory-element]_ | {html}A factory to create instances of {@link SimpleMessageGroupMap} when +implementing the Message +Groups functionality.{html} | + | _[|#mirroredQueue-element]_ | {html}Creates Mirrored +Queue using a prefix and postfix to define the topic name on which to mirror the queue to.{html} | + | _[|#multicastNetworkConnector-element]_ | {html}A network connector which uses some kind of multicast-like transport that +communicates with potentially many remote brokers over a single logical +{@link Transport} instance such as when using multicast. + +This implementation does not depend on multicast at all; any other group +based transport could be used.{html} | + | _[|#multicastTraceBrokerPlugin-element]_ | {html}A Broker interceptor which allows you to trace all operations to a Multicast +socket.{html} | + | _[|#mysql-jdbc-adapter-element]_ | {html}{html} | + | _[|#networkConnector-element]_ | {html}A network connector which uses a discovery agent to detect the remote brokers +available and setup a connection to each available remote broker{html} | + | _[|#noSubscriptionRecoveryPolicy-element]_ | {html}This SubscriptionRecoveryPolicy disable recovery of messages.{html} | + | _[|#oldestMessageEvictionStrategy-element]_ | {html}An eviction strategy which evicts the oldest message first (which is the +default).{html} | + | _[|#oldestMessageWithLowestPriorityEvictionStrategy-element]_ | {html}An eviction strategy which evicts the oldest message with the lowest priority first.{html} | + | _[|#oracleBlobJDBCAdapter-element]_ | {html}Implements all the default JDBC operations that are used +by the JDBCPersistenceAdapter. +

    +Subclassing is encouraged to override the default +implementation of methods to account for differences +in JDBC Driver implementations. +

    +The JDBCAdapter inserts and extracts BLOB data using the +getBytes()/setBytes() operations. +

    +The databases/JDBC drivers that use this adapter are: +

      +
    • +
    {html} | + | _[|#oracleJDBCAdapter-element]_ | {html}Implements all the default JDBC operations that are used +by the JDBCPersistenceAdapter. +

    +Subclassing is encouraged to override the default +implementation of methods to account for differences +in JDBC Driver implementations. +

    {html} | + | _[|#outboundQueueBridge-element]_ | {html}Create an Outbound Queue Bridge. By default the bridge uses the same +name for both the inbound and outbound queues, however this can be altered +by using the public setter methods to configure both inbound and outbound +queue names.{html} | + | _[|#outboundTopicBridge-element]_ | {html}Create an Outbound Topic Bridge. By default the bridge uses the same +name for both the inbound and outbound topics, however this can be altered +by using the public setter methods to configure both inbound and outbound +topic names.{html} | + | _[|#pListStore-element]_ | {html}{html} | + | _[|#policyEntry-element]_ | {html}Represents an entry in a {@link PolicyMap} for assigning policies to a +specific destination or a hierarchical wildcard area of destinations.{html} | + | _[|#policyMap-element]_ | {html}Represents a destination based configuration of policies so that individual +destinations or wildcard hierarchies of destinations can be configured using +different policies.{html} | + | _[|#postgresql-jdbc-adapter-element]_ | {html}Implements all the default JDBC operations that are used +by the JDBCPersistenceAdapter. +

    +Subclassing is encouraged to override the default +implementation of methods to account for differences +in JDBC Driver implementations. +

    +The JDBCAdapter inserts and extracts BLOB data using the +getBytes()/setBytes() operations. +

    +The databases/JDBC drivers that use this adapter are: +

      +
    • +
    {html} | + | _[|#prefetchPolicy-element]_ | {html}Defines the prefetch message policies for different types of consumers{html} | + | _[|#prefetchRatePendingMessageLimitStrategy-element]_ | {html}This PendingMessageLimitStrategy sets the maximum pending message limit value to be +a multiplier of the prefetch limit of the subscription.{html} | + | _[|#priorityNetworkDispatchPolicy-element]_ | {html}dispatch policy that ignores lower priority duplicate network consumers, +used in conjunction with network bridge suppresDuplicateTopicSubscriptions{html} | + | _[|#proxyConnector-element]_ | {html}{html} | + | _[|#queryBasedSubscriptionRecoveryPolicy-element]_ | {html}This implementation of {@link SubscriptionRecoveryPolicy} will perform a user +specific query mechanism to load any messages they may have missed.{html} | + | _[|#queue-element]_ | {html}An ActiveMQ Queue{html} | + | _[|#queueDispatchSelector-element]_ | {html}Queue dispatch policy that determines if a message can be sent to a subscription{html} | + | _[|#reconnectionPolicy-element]_ | {html}A policy object that defines how a {@link JmsConnector} deals with +reconnection of the local and foreign connections.{html} | + | _[|#redeliveryPlugin-element]_ | {html}Replace regular DLQ handling with redelivery via a resend to the original destination +after a delay +A destination matching RedeliveryPolicy controls the quantity and delay for re-sends +If there is no matching policy or an existing policy limit is exceeded by default +regular DLQ processing resumes. This is controlled via sendToDlqIfMaxRetriesExceeded +and fallbackToDeadLetter{html} | + | _[|#redeliveryPolicy-element]_ | {html}Configuration options for a messageConsumer used to control how messages are re-delivered when they +are rolled back. +May be used server side on a per destination basis via the Broker RedeliveryPlugin{html} | + | _[|#redeliveryPolicyMap-element]_ | {html}Represents a destination based configuration of policies so that individual +destinations or wildcard hierarchies of destinations can be configured using +different policies.{html} | + | _[|#roundRobinDispatchPolicy-element]_ | {html}Simple dispatch policy that sends a message to every subscription that +matches the message.{html} | + | _[|#shared-file-locker-element]_ | {html}Represents an exclusive lock on a database to avoid multiple brokers running +against the same logical database.{html} | + | _[|#sharedDeadLetterStrategy-element]_ | {html}A default implementation of {@link DeadLetterStrategy} which uses +a constant destination.{html} | + | _[|#simpleAuthenticationPlugin-element]_ | {html}Provides a simple authentication plugin{html} | + | _[|#simpleAuthorizationMap-element]_ | {html}An AuthorizationMap which is configured with individual DestinationMaps for +each operation.{html} | + | _[|#simpleDispatchPolicy-element]_ | {html}Simple dispatch policy that sends a message to every subscription that +matches the message.{html} | + | _[|#simpleDispatchSelector-element]_ | {html}Simple dispatch policy that determines if a message can be sent to a subscription{html} | + | _[|#simpleJmsMessageConvertor-element]_ | {html}Converts Message from one JMS to another{html} | + | _[|#simpleMessageGroupMapFactory-element]_ | {html}A factory to create instances of {@link SimpleMessageGroupMap} when implementing the +Message Groups functionality.{html} | + | _[|#sslContext-element]_ | {html}Extends the SslContext so that it's easier to configure from spring.{html} | + | _[|#statements-element]_ | {html}{html} | + | _[|#statisticsBrokerPlugin-element]_ | {html}A StatisticsBrokerPlugin +You can retrieve a Map Message for a Destination - or +Broker containing statistics as key-value pairs The message must contain a +replyTo Destination - else its ignored +To retrieve stats on the broker send a empty message to ActiveMQ.Statistics.Broker (Queue or Topic) +With a replyTo set to the destination you want the stats returned to. +To retrieve stats for a destination - e.g. foo - send an empty message to ActiveMQ.Statistics.Destination.foo +- this works with wildcards to - you get a message for each wildcard match on the replyTo destination. +The stats message is a MapMessage populated with statistics for the target{html} | + | _[|#storeCursor-element]_ | {html}Pending messages{html} | + | _[|#storeDurableSubscriberCursor-element]_ | {html}Pending messages for a durable{html} | + | _[|#storeUsage-element]_ | {html}Used to keep track of how much of something is being used so that a +productive working set usage can be controlled. Main use case is manage +memory usage.{html} | + | _[|#streamJDBCAdapter-element]_ | {html}This JDBCAdapter inserts and extracts BLOB data using the +setBinaryStream()/getBinaryStream() operations. + +The databases/JDBC drivers that use this adapter are: +
      +
    • Axion
    • +
    {html} | + | _[|#strictOrderDispatchPolicy-element]_ | {html}Dispatch policy that causes every subscription to see messages in the same +order.{html} | + | _[|#sybase-jdbc-adapter-element]_ | {html}A JDBC Adapter for Sybase databases{html} | + | _[|#systemUsage-element]_ | {html}Holder for Usage instances for memory, store and temp files Main use case is +manage memory usage.{html} | + | _[|#taskRunnerFactory-element]_ | {html}Manages the thread pool for long running tasks. Long running tasks are not +always active but when they are active, they may need a few iterations of +processing for them to become idle. The manager ensures that each task is +processes but that no one task overtakes the system. This is kinda like +cooperative multitasking.{html} | + | _[|#tempDestinationAuthorizationEntry-element]_ | {html}Represents an entry in a {@link DefaultAuthorizationMap} for assigning +different operations (read, write, admin) of user roles to a temporary +destination{html} | + | _[|#tempQueue-element]_ | {html}An ActiveMQ Temporary Queue Destination{html} | + | _[|#tempTopic-element]_ | {html}An ActiveMQ Temporary Topic Destination{html} | + | _[|#tempUsage-element]_ | {html}Used to keep track of how much of something is being used so that a +productive working set usage can be controlled. Main use case is manage +memory usage.{html} | + | _[|#timeStampingBrokerPlugin-element]_ | {html}A Broker interceptor which updates a JMS Client's timestamp on the message +with a broker timestamp. Useful when the clocks on client machines are known +to not be correct and you can only trust the time set on the broker machines. + +Enabling this plugin will break JMS compliance since the timestamp that the +producer sees on the messages after as send() will be different from the +timestamp the consumer will observe when he receives the message. This plugin +is not enabled in the default ActiveMQ configuration. + +2 new attributes have been added which will allow the administrator some override control +over the expiration time for incoming messages: + +Attribute 'zeroExpirationOverride' can be used to apply an expiration +time to incoming messages with no expiration defined (messages that would never expire) + +Attribute 'ttlCeiling' can be used to apply a limit to the expiration time{html} | + | _[|#timedSubscriptionRecoveryPolicy-element]_ | {html}This implementation of {@link SubscriptionRecoveryPolicy} will keep a timed +buffer of messages around in memory and use that to recover new +subscriptions.{html} | + | _[|#topic-element]_ | {html}An ActiveMQ Topic{html} | + | _[|#traceBrokerPathPlugin-element]_ | {html}The TraceBrokerPathPlugin can be used in a network of Brokers. Each Broker +that has the plugin configured, will add it's brokerName to the content +of a JMS Property. If all Brokers have this property enabled, the path the +message actually took through the network can be seen in the defined property.{html} | + | _[|#transact-database-locker-element]_ | {html}Represents an exclusive lock on a database to avoid multiple brokers running +against the same logical database.{html} | + | _[|#transact-jdbc-adapter-element]_ | {html}A JDBC Adapter for Transact-SQL based databases such as SQL Server or Sybase{html} | + | _[|#transportConnector-element]_ | {html}{html} | + | _[|#udpTraceBrokerPlugin-element]_ | {html}A Broker interceptor which allows you to trace all operations to a UDP +socket.{html} | + | _[|#uniquePropertyMessageEvictionStrategy-element]_ | {html}An eviction strategy which evicts the oldest message within messages with the same property value{html} | + | _[|#usageCapacity-element]_ | {html}Identify if a limit has been reached{html} | + | _[|#virtualDestinationInterceptor-element]_ | {html}Implements Virtual Topics.{html} | + | _[|#virtualSelectorCacheBrokerPlugin-element]_ | {html}A plugin which allows the caching of the selector from a subscription queue. +

    +This stops the build-up of unwanted messages, especially when consumers may +disconnect from time to time when using virtual destinations. +

    +This is influenced by code snippets developed by Maciej Rakowicz{html} | + | _[|#virtualTopic-element]_ | {html}Creates Virtual +Topics using a prefix and postfix. The virtual destination creates a +wildcard that is then used to look up all active queue subscriptions which +match.{html} | + | _[|#vmCursor-element]_ | {html}Pending messages held{html} | + | _[|#vmDurableCursor-element]_ | {html}Pending{html} | + | _[|#vmQueueCursor-element]_ | {html}Pending messages{html} | + | _[|#xaConnectionFactory-element]_ | {html}A Spring enhanced XA connection +factory which will automatically use the Spring bean name as the clientIDPrefix property +so that connections created have client IDs related to your Spring.xml file for +easier comprehension from JMX.{html} | + diff --git a/activemq-leveldb/kahadb-vs-leveldb.png b/activemq-leveldb/kahadb-vs-leveldb.png new file mode 100644 index 0000000000..e4d9c57783 Binary files /dev/null and b/activemq-leveldb/kahadb-vs-leveldb.png differ diff --git a/activemq-leveldb/pom.xml b/activemq-leveldb/pom.xml new file mode 100644 index 0000000000..5464fb676e --- /dev/null +++ b/activemq-leveldb/pom.xml @@ -0,0 +1,434 @@ + + + + + 4.0.0 + + + org.apache.activemq + activemq-parent + 5.7-SNAPSHOT + + + activemq-leveldb + jar + + ActiveMQ :: LevelDB + ActiveMQ LevelDB based store + + + + + + org.scala-lang + scala-library + ${scala-version} + compile + + + + org.apache.activemq + activemq-core + 5.7-SNAPSHOT + provided + + + + org.fusesource.hawtbuf + hawtbuf-proto + ${hawtbuf-version} + + + + org.fusesource.hawtdispatch + hawtdispatch-scala + ${hawtdispatch-version} + + + + org.iq80.leveldb + leveldb + 0.2 + + + + org.fusesource.leveldbjni + leveldbjni-osx + 1.3 + + + org.fusesource.leveldbjni + leveldbjni-linux32 + 1.3 + + + org.fusesource.leveldbjni + leveldbjni-linux64 + 1.3 + + + org.fusesource.leveldbjni + leveldbjni-win32 + 1.3 + + + org.fusesource.leveldbjni + leveldbjni-win64 + 1.3 + + + + + org.xerial.snappy + snappy-java + 1.0.3 + + + + org.iq80.snappy + snappy + 0.2 + true + + + + org.codehaus.jackson + jackson-core-asl + ${jackson-version} + + + org.codehaus.jackson + jackson-mapper-asl + ${jackson-version} + + + + org.apache.hadoop + hadoop-core + ${hadoop-version} + + + + commons-cli + commons-cli + + + xmlenc + xmlenc + + + commons-codec + commons-codec + + + org.apache.commons + commons-math + + + commons-net + commons-net + + + commons-httpclient + commons-httpclient + + + tomcat + jasper-runtime + + + tomcat + jasper-compiler + + + commons-el + commons-el + + + net.java.dev.jets3t + jets3t + + + net.sf.kosmosfs + kfs + + + hsqldb + hsqldb + + + oro + oro + + + org.eclipse.jdt + core + + + + + + + org.apache.activemq + activemq-core + 5.7-SNAPSHOT + test-jar + test + + + org.apache.activemq + activemq-console + 5.7-SNAPSHOT + test + + + + + org.apache.hadoop + hadoop-test + ${hadoop-version} + test + + + commons-lang + commons-lang + 2.6 + test + + + org.mortbay.jetty + jetty + 6.1.26 + test + + + org.mortbay.jetty + jetty-util + 6.1.26 + test + + + tomcat + jasper-runtime + 5.5.12 + test + + + tomcat + jasper-compiler + 5.5.12 + test + + + org.mortbay.jetty + jsp-api-2.1 + 6.1.14 + test + + + org.mortbay.jetty + jsp-2.1 + 6.1.14 + test + + + org.apache.commons + commons-math + 2.2 + test + + + + org.scalatest + scalatest_2.9.1 + ${scalatest-version} + test + + + junit + junit + test + + + + + + + + + org.scala-tools + maven-scala-plugin + ${scala-plugin-version} + + + compile + compile + compile + + + test-compile + + testCompile + + test-compile + + + process-resources + + compile + + + + + + + -Xmx1024m + -Xss8m + + ${scala-version} + + -deprecation + + + + org.fusesource.jvmassert + jvmassert + 1.1 + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + + + false + + false + true + false + + + + org.fusesource.hawtbuf + hawtbuf-protoc + ${hawtbuf-version} + + alt + + + + + compile + + + + + + org.fusesource.mvnplugins + maven-uberize-plugin + 1.14 + + + all + package + uberize + + + + true + uber + + + org.scala-lang:scala-library + org.fusesource.hawtdispatch:hawtdispatch + org.fusesource.hawtdispatch:hawtdispatch-scala + org.fusesource.hawtbuf:hawtbuf + org.fusesource.hawtbuf:hawtbuf-proto + + org.iq80.leveldb:leveldb-api + + + org.xerial.snappy:snappy-java + + org.fusesource.leveldbjni:leveldbjni + org.fusesource.leveldbjni:leveldbjni-osx + org.fusesource.leveldbjni:leveldbjni-linux32 + org.fusesource.leveldbjni:leveldbjni-linux64 + org.fusesource.hawtjni:hawtjni-runtime + + + org.apache.hadoop:hadoop-core + commons-configuration:commons-configuration + org.codehaus.jackson:jackson-mapper-asl + org.codehaus.jackson:jackson-core-asl + + + + + + + org.apache.felix + maven-bundle-plugin + + bundle + + + ${project.groupId}.${project.artifactId} + org.apache.activemq.activemq-core + + org.apache.activemq.leveldb*;version=${project.version};-noimport:=;-split-package:=merge-last, + + *;inline=**;artifactId= + hawtjni-runtime|hawtbuf|hawtbuf-proto|hawtdispatch|hawtdispatch-scala|scala-library| + leveldb-api|leveldbjni|leveldbjni-osx|leveldbjni-linux32|leveldbjni-linux64| + hadoop-core|commons-configuration|jackson-mapper-asl|jackson-core-asl|commons-lang + true + *;resolution:=optional + + + + + bundle + package + + bundle + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + always + + **/EnqueueRateScenariosTest.* + + + + + + diff --git a/activemq-leveldb/readme.md b/activemq-leveldb/readme.md new file mode 100644 index 0000000000..2a4d9384be --- /dev/null +++ b/activemq-leveldb/readme.md @@ -0,0 +1,95 @@ +# The LevelDB Store + +## Overview + +The LevelDB Store is message store implementation that can be used in ActiveMQ messaging servers. + +## LevelDB vs KahaDB + +How is the LevelDB Store better than the default KahaDB store: + + * It maitains fewer index entries per message than KahaDB which means it has a higher persistent throughput. + * Faster recovery when a broker restarts + * Since the broker tends to write and read queue entries sequentially, the LevelDB based index provide a much better performance than the B-Tree based indexes of KahaDB which increases throughput. + * Unlike the KahaDB indexes, the LevelDB indexes support concurrent read access which further improves read throughput. + * Pauseless data log file garbage collection cycles. + * It uses fewer read IO operations to load stored messages. + * If a message is copied to multiple queues (Typically happens if your using virtual topics with multiple + consumers), then LevelDB will only journal the payload of the message once. KahaDB will journal it multiple times. + * It exposes it's status via JMX for monitoring + * Supports replication to get High Availability + +See the following chart to get an idea on how much better you can expect the LevelDB store to perform vs the KahaDB store: + +![kahadb-vs-leveldb.png ](https://raw.github.com/fusesource/fuse-extra/master/fusemq-leveldb/kahadb-vs-leveldb.png) + +## How to Use with ActiveMQ + +Update the broker configuration file and change `persistenceAdapter` elements +settings so that it uses the LevelDB store using the following spring XML +configuration example: + + + + + +### Configuration / Property Reference + +*TODO* + +### JMX Attribute and Operation Reference + +*TODO* + +## Known Limitations + +* XA Transactions not supported yet +* The store does not do any dup detection of messages. + +## Built in High Availability Support + +You can also use a High Availability (HA) version of the LevelDB store which +works with Hadoop based file systems to achive HA of your stored messages. + +**Q:** What are the requirements? +**A:** An existing Hadoop 1.0.0 cluster + +**Q:** How does it work during the normal operating cycle? +A: It uses HDFS to store a highly available copy of the local leveldb storage files. As local log files are being written to, it also maintains a mirror copy on HDFS. If you have sync enabled on the store, a HDFS file sync is performed instead of a local disk sync. When the index is check pointed, we upload any previously not uploaded leveldb .sst files to HDFS. + +**Q:** What happens when a broker fails and we startup a new slave to take over? +**A:** The slave will download from HDFS the log files and the .sst files associated with the latest uploaded index. Then normal leveldb store recovery kicks in which updates the index using the log files. + +**Q:** How do I use the HA version of the LevelDB store? +**A:** Update your activemq.xml to use a `persistenceAdapter` setting similar to the following: + + + + + + + + + + + + + + + + Notice the implementation class name changes to 'HALevelDBStore' + Instead of using a 'dfsUrl' property you can instead also just load an existing Hadoop configuration file if it's available on your system, for example: + + +**Q:** Who handles starting up the Slave? +**A:** You do. :) This implementation assumes master startup/elections are performed externally and that 2 brokers are never running against the same HDFS file path. In practice this means you need something like ZooKeeper to control starting new brokers to take over failed masters. + +**Q:** Can this run against something other than HDFS? +**A:** It should be able to run with any Hadoop supported file system like CloudStore, S3, MapR, NFS, etc (Well at least in theory, I've only tested against HDFS). + +**Q:** Can 'X' performance be optimized? +**A:** There are bunch of way to improve the performance of many of the things that current version of the store is doing. For example, aggregating the .sst files into an archive to make more efficient use of HDFS, concurrent downloading to improve recovery performance. Lazy downloading of the oldest log files to make recovery faster. Async HDFS writes to avoid blocking local updates. Running brokers in a warm 'standy' mode which keep downloading new log updates and applying index updates from the master as they get uploaded to HDFS to get faster failovers. + +**Q:** Does the broker fail if HDFS fails? +**A:** Currently, yes. But it should be possible to make the master resilient to HDFS failures. diff --git a/activemq-core/src/main/java/org/apache/activemq/store/leveldb/LevelDBPersistenceAdapter.java b/activemq-leveldb/src/main/java/org/apache/activemq/store/leveldb/LevelDBPersistenceAdapter.java similarity index 95% rename from activemq-core/src/main/java/org/apache/activemq/store/leveldb/LevelDBPersistenceAdapter.java rename to activemq-leveldb/src/main/java/org/apache/activemq/store/leveldb/LevelDBPersistenceAdapter.java index bd5c937905..987808e79a 100644 --- a/activemq-core/src/main/java/org/apache/activemq/store/leveldb/LevelDBPersistenceAdapter.java +++ b/activemq-leveldb/src/main/java/org/apache/activemq/store/leveldb/LevelDBPersistenceAdapter.java @@ -16,7 +16,7 @@ */ package org.apache.activemq.store.leveldb; -import org.fusesource.mq.leveldb.LevelDBStore; +import org.apache.activemq.leveldb.LevelDBStore; /** diff --git a/activemq-leveldb/src/main/proto/records.proto b/activemq-leveldb/src/main/proto/records.proto new file mode 100644 index 0000000000..d7ec58d356 --- /dev/null +++ b/activemq-leveldb/src/main/proto/records.proto @@ -0,0 +1,56 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +package org.apache.activemq.leveldb.record; + +option java_multiple_files = true; + +// +// We create a collection record for each +// transaction, queue, topic. +// +message CollectionKey { + required int64 key = 1; +} +message CollectionRecord { + optional int64 key = 1; + optional int32 type = 2; + optional bytes meta = 3 [java_override_type = "Buffer"]; +} + +// +// We create a entry record for each message, subscription, +// and subscription position. +// +message EntryKey { + required int64 collection_key = 1; + required bytes entry_key = 2 [java_override_type = "Buffer"]; +} +message EntryRecord { + optional int64 collection_key = 1; + optional bytes entry_key = 2 [java_override_type = "Buffer"]; + optional int64 value_location = 3; + optional int32 value_length = 4; + optional bytes value = 5 [java_override_type = "Buffer"]; + optional bytes meta = 6 [java_override_type = "Buffer"]; +} + +message SubscriptionRecord { + optional int64 topic_key = 1; + optional string client_id = 2; + optional string subscription_name = 3; + optional string selector = 4; + optional string destination_name = 5; +} \ No newline at end of file diff --git a/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb.scala b/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb.scala new file mode 100644 index 0000000000..5774105f87 --- /dev/null +++ b/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb.scala @@ -0,0 +1,139 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.activemq + +import java.nio.ByteBuffer +import org.fusesource.hawtbuf.Buffer +import org.xerial.snappy.{Snappy => Xerial} +import org.iq80.snappy.{Snappy => Iq80} + +/** + *

    + * A Snappy abstraction which attempts uses the iq80 implementation and falls back + * to the xerial Snappy implementation it cannot be loaded. You can change the + * load order by setting the 'leveldb.snappy' system property. Example: + * + * + * -Dleveldb.snappy=xerial,iq80 + * + * + * The system property can also be configured with the name of a class which + * implements the Snappy.SPI interface. + *

    + * + * @author Hiram Chirino + */ +package object leveldb { + + final val Snappy = { + var attempt:SnappyTrait = null + System.getProperty("leveldb.snappy", "iq80,xerial").split(",").foreach { x => + if( attempt==null ) { + try { + var name = x.trim(); + name = name.toLowerCase match { + case "xerial" => "org.apache.activemq.leveldb.XerialSnappy" + case "iq80" => "org.apache.activemq.leveldb.IQ80Snappy" + case _ => name + } + attempt = Thread.currentThread().getContextClassLoader().loadClass(name).newInstance().asInstanceOf[SnappyTrait]; + attempt.compress("test") + } catch { + case x => + attempt = null + } + } + } + attempt + } + + + trait SnappyTrait { + + def uncompressed_length(input: Buffer):Int + def uncompress(input: Buffer, output:Buffer): Int + + def max_compressed_length(length: Int): Int + def compress(input: Buffer, output: Buffer): Int + + def compress(input: Buffer):Buffer = { + val compressed = new Buffer(max_compressed_length(input.length)) + compressed.length = compress(input, compressed) + compressed + } + + def compress(text: String): Buffer = { + val uncompressed = new Buffer(text.getBytes("UTF-8")) + val compressed = new Buffer(max_compressed_length(uncompressed.length)) + compressed.length = compress(uncompressed, compressed) + return compressed + } + + def uncompress(input: Buffer):Buffer = { + val uncompressed = new Buffer(uncompressed_length(input)) + uncompressed.length = uncompress(input, uncompressed) + uncompressed + } + + def uncompress(compressed: ByteBuffer, uncompressed: ByteBuffer): Int = { + val input = if (compressed.hasArray) { + new Buffer(compressed.array, compressed.arrayOffset + compressed.position, compressed.remaining) + } else { + val t = new Buffer(compressed.remaining) + compressed.mark + compressed.get(t.data) + compressed.reset + t + } + + val output = if (uncompressed.hasArray) { + new Buffer(uncompressed.array, uncompressed.arrayOffset + uncompressed.position, uncompressed.capacity()-uncompressed.position) + } else { + new Buffer(uncompressed_length(input)) + } + + output.length = uncompress(input, output) + + if (uncompressed.hasArray) { + uncompressed.limit(uncompressed.position + output.length) + } else { + val p = uncompressed.position + uncompressed.limit(uncompressed.capacity) + uncompressed.put(output.data, output.offset, output.length) + uncompressed.flip.position(p) + } + return output.length + } + } +} +package leveldb { + class XerialSnappy extends SnappyTrait { + override def uncompress(compressed: ByteBuffer, uncompressed: ByteBuffer) = Xerial.uncompress(compressed, uncompressed) + def uncompressed_length(input: Buffer) = Xerial.uncompressedLength(input.data, input.offset, input.length) + def uncompress(input: Buffer, output: Buffer) = Xerial.uncompress(input.data, input.offset, input.length, output.data, output.offset) + def max_compressed_length(length: Int) = Xerial.maxCompressedLength(length) + def compress(input: Buffer, output: Buffer) = Xerial.compress(input.data, input.offset, input.length, output.data, output.offset) + override def compress(text: String) = new Buffer(Xerial.compress(text)) + } + + class IQ80Snappy extends SnappyTrait { + def uncompressed_length(input: Buffer) = Iq80.getUncompressedLength(input.data, input.offset) + def uncompress(input: Buffer, output: Buffer): Int = Iq80.uncompress(input.data, input.offset, input.length, output.data, output.offset) + def compress(input: Buffer, output: Buffer): Int = Iq80.compress(input.data, input.offset, input.length, output.data, output.offset) + def max_compressed_length(length: Int) = Iq80.maxCompressedLength(length) + } +} diff --git a/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/DBManager.scala b/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/DBManager.scala new file mode 100644 index 0000000000..30b134ac4b --- /dev/null +++ b/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/DBManager.scala @@ -0,0 +1,735 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.activemq.leveldb + +import org.fusesource.hawtdispatch._ +import org.fusesource.hawtdispatch.BaseRetained +import java.util.concurrent._ +import atomic._ +import org.fusesource.hawtbuf.Buffer +import org.apache.activemq.store.MessageRecoveryListener +import java.lang.ref.WeakReference +import scala.Option._ +import org.fusesource.hawtbuf.Buffer._ +import org.apache.activemq.command._ +import org.apache.activemq.leveldb.record.{SubscriptionRecord, CollectionRecord} +import util.TimeMetric +import java.util.HashMap +import collection.mutable.{HashSet, ListBuffer} +import org.apache.activemq.thread.DefaultThreadPools + +case class MessageRecord(id:MessageId, data:Buffer, syncNeeded:Boolean) { + var locator:(Long, Int) = _ +} + +case class QueueEntryRecord(id:MessageId, queueKey:Long, queueSeq:Long) +case class QueueRecord(id:ActiveMQDestination, queue_key:Long) +case class QueueEntryRange() +case class SubAckRecord(subKey:Long, ackPosition:Long) + +sealed trait UowState { + def stage:Int +} +// UoW is initial open. +object UowOpen extends UowState { + override def stage = 0 + override def toString = "UowOpen" +} +// UoW is Committed once the broker finished creating it. +object UowClosed extends UowState { + override def stage = 1 + override def toString = "UowClosed" +} +// UOW is delayed until we send it to get flushed. +object UowDelayed extends UowState { + override def stage = 2 + override def toString = "UowDelayed" +} +object UowFlushQueued extends UowState { + override def stage = 3 + override def toString = "UowFlushQueued" +} + +object UowFlushing extends UowState { + override def stage = 4 + override def toString = "UowFlushing" +} +// Then it moves on to be flushed. Flushed just +// means the message has been written to disk +// and out of memory +object UowFlushed extends UowState { + override def stage = 5 + override def toString = "UowFlushed" +} + +// Once completed then you know it has been synced to disk. +object UowCompleted extends UowState { + override def stage = 6 + override def toString = "UowCompleted" +} + +/** + *

    + *

    + * + * @author Hiram Chirino + */ +case class CountDownFuture(completed:CountDownLatch=new CountDownLatch(1)) extends java.util.concurrent.Future[Object] { + def countDown = completed.countDown() + def cancel(mayInterruptIfRunning: Boolean) = false + def isCancelled = false + + def get() = { + completed.await() + null + } + + def get(p1: Long, p2: TimeUnit) = { + if(completed.await(p1, p2)) { + null + } else { + throw new TimeoutException + } + } + + def isDone = completed.await(0, TimeUnit.SECONDS); +} + +object UowManagerConstants { + val QUEUE_COLLECTION_TYPE = 1 + val TOPIC_COLLECTION_TYPE = 2 + val TRANSACTION_COLLECTION_TYPE = 3 + val SUBSCRIPTION_COLLECTION_TYPE = 4 + + case class QueueEntryKey(queue:Long, seq:Long) + def key(x:QueueEntryRecord) = QueueEntryKey(x.queueKey, x.queueSeq) +} + +import UowManagerConstants._ + +class DelayableUOW(val manager:DBManager) extends BaseRetained { + val countDownFuture = CountDownFuture() + var canceled = false; + + val uowId:Int = manager.lastUowId.incrementAndGet() + var actions = Map[MessageId, MessageAction]() + var subAcks = ListBuffer[SubAckRecord]() + var completed = false + var disableDelay = false + var delayableActions = 0 + + private var _state:UowState = UowOpen + + def state = this._state + def state_=(next:UowState) { + assert(this._state.stage < next.stage) + this._state = next + } + + def syncNeeded = actions.find( _._2.syncNeeded ).isDefined + def size = 100+actions.foldLeft(0L){ case (sum, entry) => + sum + (entry._2.size+100) + } + (subAcks.size * 100) + + class MessageAction { + var id:MessageId = _ + var messageRecord: MessageRecord = null + var enqueues = ListBuffer[QueueEntryRecord]() + var dequeues = ListBuffer[QueueEntryRecord]() + + def uow = DelayableUOW.this + def isEmpty() = messageRecord==null && enqueues==Nil && dequeues==Nil + + def cancel() = { + uow.rm(id) + } + + def syncNeeded = messageRecord!=null && messageRecord.syncNeeded + def size = (if(messageRecord!=null) messageRecord.data.length+20 else 0) + ((enqueues.size+dequeues.size)*50) + + def addToPendingStore() = { + var set = manager.pendingStores.get(id) + if(set==null) { + set = HashSet() + manager.pendingStores.put(id, set) + } + set.add(this) + } + + def removeFromPendingStore() = { + var set = manager.pendingStores.get(id) + if(set!=null) { + set.remove(this) + if(set.isEmpty) { + manager.pendingStores.remove(id) + } + } + } + + } + + def completeAsap() = this.synchronized { disableDelay=true } + def delayable = !disableDelay && delayableActions>0 && manager.flushDelay>=0 + + def rm(msg:MessageId) = { + actions -= msg + if( actions.isEmpty && state.stage < UowFlushing.stage ) { + cancel + } + } + + def cancel = { + manager.dispatchQueue.assertExecuting() + manager.uowCanceledCounter += 1 + canceled = true + manager.flush_queue.remove(uowId) + onCompleted + } + + def getAction(id:MessageId) = { + actions.get(id) match { + case Some(x) => x + case None => + val x = new MessageAction + x.id = id + actions += id->x + x + } + } + + def updateAckPosition(sub:DurableSubscription) = { + subAcks += SubAckRecord(sub.subKey, sub.lastAckPosition) + } + + def enqueue(queueKey:Long, queueSeq:Long, message:Message, delay_enqueue:Boolean) = { + var delay = delay_enqueue && message.getTransactionId==null + if(delay ) { + manager.uowEnqueueDelayReqested += 1 + } else { + manager.uowEnqueueNodelayReqested += 1 + } + + val id = message.getMessageId + + + val messageRecord = id.getDataLocator match { + case null => + var packet = manager.parent.wireFormat.marshal(message) + var data = new Buffer(packet.data, packet.offset, packet.length) + if( manager.snappyCompressLogs ) { + data = Snappy.compress(data) + } + val record = MessageRecord(id, data, message.isResponseRequired) + id.setDataLocator(record) + record + case record:MessageRecord => + record + case x:(Long, Int) => + null + } + + val entry = QueueEntryRecord(id, queueKey, queueSeq) + assert(id.getEntryLocator == null) + id.setEntryLocator((queueKey, queueSeq)) + + val a = this.synchronized { + if( !delay ) + disableDelay = true + + val action = getAction(entry.id) + action.messageRecord = messageRecord + action.enqueues += entry + delayableActions += 1 + action + } + + manager.dispatchQueue { + manager.cancelable_enqueue_actions.put(key(entry), a) + a.addToPendingStore() + } + countDownFuture + } + + def dequeue(queueKey:Long, id:MessageId) = { + val (queueKey, queueSeq) = id.getEntryLocator.asInstanceOf[(Long, Long)]; + val entry = QueueEntryRecord(id, queueKey, queueSeq) + this.synchronized { + getAction(id).dequeues += entry + } + countDownFuture + } + + def complete_asap = this.synchronized { + disableDelay=true + if( state eq UowDelayed ) { + manager.enqueueFlush(this) + } + } + + var complete_listeners = ListBuffer[()=>Unit]() + def addCompleteListener(func: =>Unit) = { + complete_listeners.append( func _ ) + } + + var asyncCapacityUsed = 0L + var disposed_at = 0L + + override def dispose = this.synchronized { + state = UowClosed + disposed_at = System.nanoTime() + if( !syncNeeded ) { + val s = size + if( manager.asyncCapacityRemaining.addAndGet(-s) > 0 ) { + asyncCapacityUsed = s + countDownFuture.countDown + DefaultThreadPools.getDefaultTaskRunnerFactory.execute(^{ + complete_listeners.foreach(_()) + }) + } else { + manager.asyncCapacityRemaining.addAndGet(s) + } + } + // closeSource.merge(this) + manager.dispatchQueue { + manager.processClosed(this) + } + } + + def onCompleted() = this.synchronized { + if ( state.stage < UowCompleted.stage ) { + state = UowCompleted + if( asyncCapacityUsed != 0 ) { + manager.asyncCapacityRemaining.addAndGet(asyncCapacityUsed) + asyncCapacityUsed = 0 + } else { + manager.uow_complete_latency.add(System.nanoTime() - disposed_at) + countDownFuture.countDown + DefaultThreadPools.getDefaultTaskRunnerFactory.execute(^{ + complete_listeners.foreach(_()) + }) + } + + for( (id, action) <- actions ) { + if( !action.enqueues.isEmpty ) { + action.removeFromPendingStore() + } + for( queueEntry <- action.enqueues ) { + manager.cancelable_enqueue_actions.remove(key(queueEntry)) + } + } + super.dispose + } + } +} + +/** + *

    + *

    + * + * @author Hiram Chirino + */ +class DBManager(val parent:LevelDBStore) { + + var lastCollectionKey = new AtomicLong(0) + val client:LevelDBClient = parent.createClient + + def writeExecutor = client.writeExecutor + def flushDelay = parent.flushDelay + + val dispatchQueue = createQueue(toString) +// val aggregator = new AggregatingExecutor(dispatchQueue) + + val asyncCapacityRemaining = new AtomicLong(0L) + + def createUow() = new DelayableUOW(this) + + var uowEnqueueDelayReqested = 0L + var uowEnqueueNodelayReqested = 0L + var uowClosedCounter = 0L + var uowCanceledCounter = 0L + var uowStoringCounter = 0L + var uowStoredCounter = 0L + + val uow_complete_latency = TimeMetric() + +// val closeSource = createSource(new ListEventAggregator[DelayableUOW](), dispatchQueue) +// closeSource.setEventHandler(^{ +// closeSource.getData.foreach { uow => +// processClosed(uow) +// } +// }); +// closeSource.resume + + var pendingStores = new ConcurrentHashMap[MessageId, HashSet[DelayableUOW#MessageAction]]() + + var cancelable_enqueue_actions = new HashMap[QueueEntryKey, DelayableUOW#MessageAction]() + + val lastUowId = new AtomicInteger(1) + + def processClosed(uow:DelayableUOW) = { + dispatchQueue.assertExecuting() + uowClosedCounter += 1 + + // Broker could issue a flush_message call before + // this stage runs.. which make the stage jump over UowDelayed + if( uow.state.stage < UowDelayed.stage ) { + uow.state = UowDelayed + } + if( uow.state.stage < UowFlushing.stage ) { + uow.actions.foreach { case (id, action) => + + // The UoW may have been canceled. + if( action.messageRecord!=null && action.enqueues.isEmpty ) { + action.removeFromPendingStore() + action.messageRecord = null + uow.delayableActions -= 1 + } + if( action.isEmpty ) { + action.cancel() + } + + // dequeues can cancel out previous enqueues + action.dequeues.foreach { entry=> + val entry_key = key(entry) + val prev_action:DelayableUOW#MessageAction = cancelable_enqueue_actions.remove(entry_key) + + if( prev_action!=null ) { + val prev_uow = prev_action.uow + prev_uow.synchronized { + if( !prev_uow.canceled ) { + + prev_uow.delayableActions -= 1 + + // yay we can cancel out a previous enqueue + prev_action.enqueues = prev_action.enqueues.filterNot( x=> key(x) == entry_key ) + if( prev_uow.state.stage >= UowDelayed.stage ) { + + // if the message is not in any queues.. we can gc it.. + if( prev_action.enqueues == Nil && prev_action.messageRecord !=null ) { + prev_action.removeFromPendingStore() + prev_action.messageRecord = null + prev_uow.delayableActions -= 1 + } + + // Cancel the action if it's now empty + if( prev_action.isEmpty ) { + prev_action.cancel() + } else if( !prev_uow.delayable ) { + // flush it if there is no point in delaying anymore + prev_uow.complete_asap + } + } + } + } + // since we canceled out the previous enqueue.. now cancel out the action + action.dequeues = action.dequeues.filterNot( _ == entry) + if( action.isEmpty ) { + action.cancel() + } + } + } + } + } + + if( !uow.canceled && uow.state.stage < UowFlushQueued.stage ) { + if( uow.delayable ) { + // Let the uow get GCed if its' canceled during the delay window.. + val ref = new WeakReference[DelayableUOW](uow) + scheduleFlush(ref) + } else { + enqueueFlush(uow) + } + } + } + + private def scheduleFlush(ref: WeakReference[DelayableUOW]) { + dispatchQueue.executeAfter(flushDelay, TimeUnit.MILLISECONDS, ^ { + val uow = ref.get(); + if (uow != null) { + enqueueFlush(uow) + } + }) + } + + val flush_queue = new java.util.LinkedHashMap[Long, DelayableUOW]() + + def enqueueFlush(uow:DelayableUOW) = { + dispatchQueue.assertExecuting() + if( uow!=null && !uow.canceled && uow.state.stage < UowFlushQueued.stage ) { + uow.state = UowFlushQueued + flush_queue.put (uow.uowId, uow) + flushSource.merge(1) + } + } + + val flushSource = createSource(EventAggregators.INTEGER_ADD, dispatchQueue) + flushSource.setEventHandler(^{drainFlushes}); + flushSource.resume + + def drainFlushes:Unit = { + dispatchQueue.assertExecuting() + if( !started ) { + return + } + + // Some UOWs may have been canceled. + import collection.JavaConversions._ + val values = flush_queue.values().toSeq.toArray + flush_queue.clear() + + val uows = values.flatMap { uow=> + if( uow.canceled ) { + None + } else { + // It will not be possible to cancel the UOW anymore.. + uow.state = UowFlushing + uow.actions.foreach { case (_, action) => + action.enqueues.foreach { queue_entry=> + val action = cancelable_enqueue_actions.remove(key(queue_entry)) + assert(action!=null) + } + } + Some(uow) + } + } + + if( !uows.isEmpty ) { + uowStoringCounter += uows.size + flushSource.suspend + writeExecutor { + client.store(uows) + flushSource.resume + dispatchQueue { + uowStoredCounter += uows.size + uows.foreach { uow=> + uow.onCompleted + } + } + } + } + } + + var started = false + def snappyCompressLogs = parent.snappyCompressLogs + + def start = { + asyncCapacityRemaining.set(parent.asyncBufferSize) + client.start() + dispatchQueue.sync { + started = true + pollGc + if(parent.monitorStats) { + monitorStats + } + } + } + + def stop() = { + dispatchQueue.sync { + started = false + } + client.stop() + } + + def pollGc:Unit = dispatchQueue.after(10, TimeUnit.SECONDS) { + if( started ) { + val positions = parent.getTopicGCPositions + writeExecutor { + if( started ) { + client.gc(positions) + pollGc + } + } + } + } + + def monitorStats:Unit = dispatchQueue.after(1, TimeUnit.SECONDS) { + if( started ) { + println(("committed: %d, canceled: %d, storing: %d, stored: %d, " + + "uow complete: %,.3f ms, " + + "index write: %,.3f ms, " + + "log write: %,.3f ms, log flush: %,.3f ms, log rotate: %,.3f ms"+ + "add msg: %,.3f ms, add enqueue: %,.3f ms, " + + "uowEnqueueDelayReqested: %d, uowEnqueueNodelayReqested: %d " + ).format( + uowClosedCounter, uowCanceledCounter, uowStoringCounter, uowStoredCounter, + uow_complete_latency.reset, + client.max_index_write_latency.reset, + client.log.max_log_write_latency.reset, client.log.max_log_flush_latency.reset, client.log.max_log_rotate_latency.reset, + client.max_write_message_latency.reset, client.max_write_enqueue_latency.reset, + uowEnqueueDelayReqested, uowEnqueueNodelayReqested + )) + uowClosedCounter = 0 +// uowCanceledCounter = 0 + uowStoringCounter = 0 + uowStoredCounter = 0 + monitorStats + } + } + + ///////////////////////////////////////////////////////////////////// + // + // Implementation of the Store interface + // + ///////////////////////////////////////////////////////////////////// + + def checkpoint(sync:Boolean) = writeExecutor.sync { + client.snapshotIndex(sync) + } + + def purge = writeExecutor.sync { + client.purge + lastCollectionKey.set(1) + } + + def getLastQueueEntrySeq(key:Long) = { + client.getLastQueueEntrySeq(key) + } + + def collectionEmpty(key:Long) = writeExecutor.sync { + client.collectionEmpty(key) + } + + def collectionSize(key:Long) = { + client.collectionSize(key) + } + + def collectionIsEmpty(key:Long) = { + client.collectionIsEmpty(key) + } + + def cursorMessages(key:Long, listener:MessageRecoveryListener, startPos:Long) = { + var nextPos = startPos; + client.queueCursor(key, nextPos) { msg => + if( listener.hasSpace ) { + listener.recoverMessage(msg) + nextPos += 1 + true + } else { + false + } + } + nextPos + } + + def queuePosition(id: MessageId):Long = { + id.getEntryLocator.asInstanceOf[(Long, Long)]._2 + } + + def createQueueStore(dest:ActiveMQQueue):parent.LevelDBMessageStore = { + parent.createQueueMessageStore(dest, createStore(dest, QUEUE_COLLECTION_TYPE)) + } + def destroyQueueStore(key:Long) = writeExecutor.sync { + client.removeCollection(key) + } + + def getLogAppendPosition = writeExecutor.sync { + client.getLogAppendPosition + } + + def addSubscription(topic_key:Long, info:SubscriptionInfo):DurableSubscription = { + val record = new SubscriptionRecord.Bean + record.setTopicKey(topic_key) + record.setClientId(info.getClientId) + record.setSubscriptionName(info.getSubcriptionName) + if( info.getSelector!=null ) { + record.setSelector(info.getSelector) + } + if( info.getDestination!=null ) { + record.setDestinationName(info.getDestination.getQualifiedName) + } + val collection = new CollectionRecord.Bean() + collection.setType(SUBSCRIPTION_COLLECTION_TYPE) + collection.setKey(lastCollectionKey.incrementAndGet()) + collection.setMeta(record.freeze().toUnframedBuffer) + + val buffer = collection.freeze() + buffer.toFramedBuffer // eager encode the record. + writeExecutor.sync { + client.addCollection(buffer) + } + DurableSubscription(collection.getKey, topic_key, info) + } + + def removeSubscription(sub:DurableSubscription) = { + client.removeCollection(sub.subKey) + } + + def createTopicStore(dest:ActiveMQTopic) = { + var key = createStore(dest, TOPIC_COLLECTION_TYPE) + parent.createTopicMessageStore(dest, key) + } + + def createStore(destination:ActiveMQDestination, collectionType:Int) = { + val collection = new CollectionRecord.Bean() + collection.setType(collectionType) + collection.setMeta(utf8(destination.getQualifiedName)) + collection.setKey(lastCollectionKey.incrementAndGet()) + val buffer = collection.freeze() + buffer.toFramedBuffer // eager encode the record. + writeExecutor.sync { + client.addCollection(buffer) + } + collection.getKey + } + + def loadCollections = { + val collections = writeExecutor.sync { + client.listCollections + } + var last = 0L + collections.foreach { case (key, record) => + last = key + record.getType match { + case QUEUE_COLLECTION_TYPE => + val dest = ActiveMQDestination.createDestination(record.getMeta.utf8().toString, ActiveMQDestination.QUEUE_TYPE).asInstanceOf[ActiveMQQueue] + parent.createQueueMessageStore(dest, key) + case TOPIC_COLLECTION_TYPE => + val dest = ActiveMQDestination.createDestination(record.getMeta.utf8().toString, ActiveMQDestination.TOPIC_TYPE).asInstanceOf[ActiveMQTopic] + parent.createTopicMessageStore(dest, key) + case SUBSCRIPTION_COLLECTION_TYPE => + val sr = SubscriptionRecord.FACTORY.parseUnframed(record.getMeta) + val info = new SubscriptionInfo + info.setClientId(sr.getClientId) + info.setSubcriptionName(sr.getSubscriptionName) + if( sr.hasSelector ) { + info.setSelector(sr.getSelector) + } + if(sr.hasDestinationName) { + info.setSubscribedDestination(ActiveMQDestination.createDestination(sr.getDestinationName, ActiveMQDestination.TOPIC_TYPE)) + } + + var sub = DurableSubscription(key, sr.getTopicKey, info) + sub.lastAckPosition = client.getAckPosition(key); + parent.createSubscription(sub) + case _ => + } + } + lastCollectionKey.set(last) + } + + + def getMessage(x: MessageId):Message = { + val id = Option(pendingStores.get(x)).flatMap(_.headOption).map(_.id).getOrElse(x) + val locator = id.getDataLocator() + val msg = client.getMessage(locator) + msg.setMessageId(id) + msg + } + +} diff --git a/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/HALevelDBClient.scala b/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/HALevelDBClient.scala new file mode 100644 index 0000000000..9cb0983f51 --- /dev/null +++ b/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/HALevelDBClient.scala @@ -0,0 +1,398 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.activemq.leveldb + +import org.apache.activemq.leveldb.util._ + +import org.fusesource.leveldbjni.internal.Util +import FileSupport._ +import org.codehaus.jackson.map.ObjectMapper +import java.io._ +import scala.collection.mutable._ +import scala.collection.immutable.TreeMap +import org.fusesource.hawtbuf.{ByteArrayOutputStream, Buffer} +import org.apache.hadoop.fs.{FileSystem, Path} + +/** + * + * + * @author Hiram Chirino + */ +object JsonCodec { + + final val mapper: ObjectMapper = new ObjectMapper + + def decode[T](buffer: Buffer, clazz: Class[T]): T = { + val original = Thread.currentThread.getContextClassLoader + Thread.currentThread.setContextClassLoader(this.getClass.getClassLoader) + try { + return mapper.readValue(buffer.in, clazz) + } finally { + Thread.currentThread.setContextClassLoader(original) + } + } + + def decode[T](is: InputStream, clazz : Class[T]): T = { + var original: ClassLoader = Thread.currentThread.getContextClassLoader + Thread.currentThread.setContextClassLoader(this.getClass.getClassLoader) + try { + return JsonCodec.mapper.readValue(is, clazz) + } + finally { + Thread.currentThread.setContextClassLoader(original) + } + } + + + def encode(value: AnyRef): Buffer = { + var baos = new ByteArrayOutputStream + mapper.writeValue(baos, value) + return baos.toBuffer + } + +} + +/** + * @author Hiram Chirino + */ +object HALevelDBClient extends Log { + + val MANIFEST_SUFFIX = ".mf" + val LOG_SUFFIX = LevelDBClient.LOG_SUFFIX + val INDEX_SUFFIX = LevelDBClient.INDEX_SUFFIX + + + def create_sequence_path(directory:Path, id:Long, suffix:String) = new Path(directory, ("%016x%s".format(id, suffix))) + + def find_sequence_status(fs:FileSystem, directory:Path, suffix:String) = { + TreeMap((fs.listStatus(directory).flatMap { f => + val name = f.getPath.getName + if( name.endsWith(suffix) ) { + try { + val base = name.stripSuffix(suffix) + val position = java.lang.Long.parseLong(base, 16); + Some(position -> f ) + } catch { + case e:NumberFormatException => None + } + } else { + None + } + }): _* ) + } + +} + +/** + * + * @author Hiram Chirino + */ +class HALevelDBClient(val store:HALevelDBStore) extends LevelDBClient(store) { + import HALevelDBClient._ + + case class Snapshot(current_manifest:String, files:Set[String]) + var snapshots = TreeMap[Long, Snapshot]() + + // Eventually we will allow warm standby slaves to add references to old + // snapshots so that we don't delete them while they are in the process + // of downloading the snapshot. + var snapshotRefCounters = HashMap[Long, LongCounter]() + var indexFileRefCounters = HashMap[String, LongCounter]() + + def dfs = store.dfs + def dfsDirectory = new Path(store.dfsDirectory) + def dfsBlockSize = store.dfsBlockSize + def dfsReplication = store.dfsReplication + def remoteIndexPath = new Path(dfsDirectory, "index") + + override def start() = { + retry { + directory.mkdirs() + dfs.mkdirs(dfsDirectory) + downloadLogFiles + dfs.mkdirs(remoteIndexPath) + downloadIndexFiles + } + super.start() + storeTrace("Master takeover by: "+store.containerId, true) + } + + override def locked_purge = { + super.locked_purge + dfs.delete(dfsDirectory, true) + } + + override def snapshotIndex(sync: Boolean) = { + val previous_snapshot = lastIndexSnapshotPos + super.snapshotIndex(sync) + // upload the snapshot to the dfs + uploadIndexFiles(lastIndexSnapshotPos) + + // Drop the previous snapshot reference.. + for( counter <- snapshotRefCounters.get(previous_snapshot)) { + if( counter.decrementAndGet() <= 0 ) { + snapshotRefCounters.remove(previous_snapshot) + } + } + gcSnapshotRefs + } + + // downloads missing log files... + def downloadLogFiles { + val log_files = find_sequence_status(dfs, dfsDirectory, LOG_SUFFIX) + val downloads = log_files.flatMap( _ match { + case (id, status) => + val target = LevelDBClient.create_sequence_file(directory, id, LOG_SUFFIX) + // is it missing or does the size not match? + if (!target.exists() || target.length() != status.getLen) { + Some((id, status)) + } else { + None + } + }) + if( !downloads.isEmpty ) { + val total_size = downloads.foldLeft(0L)((a,x)=> a+x._2.getLen) + downloads.foreach { + case (id, status) => + val target = LevelDBClient.create_sequence_file(directory, id, LOG_SUFFIX) + // is it missing or does the size not match? + if (!target.exists() || target.length() != status.getLen) { + info("Downloading log file: "+status.getPath.getName) + using(dfs.open(status.getPath, 32*1024)) { is=> + using(new FileOutputStream(target)) { os=> + copy(is, os) + } + } + } + } + } + } + + // See if there is a more recent index that can be downloaded. + def downloadIndexFiles { + + snapshots = TreeMap() + dfs.listStatus(remoteIndexPath).foreach { status => + val name = status.getPath.getName + indexFileRefCounters.put(name, new LongCounter()) + if( name endsWith MANIFEST_SUFFIX ) { + info("Getting index snapshot manifest: "+status.getPath.getName) + val mf = using(dfs.open(status.getPath)) { is => + JsonCodec.decode(is, classOf[IndexManifestDTO]) + } + import collection.JavaConversions._ + snapshots += mf.snapshot_id -> Snapshot(mf.current_manifest, Set(mf.files.toSeq:_*)) + } + } + + // Check for invalid snapshots.. + for( (snapshotid, snapshot) <- snapshots) { + val matches = indexFileRefCounters.keySet & snapshot.files + if( matches.size != snapshot.files.size ) { + var path = create_sequence_path(remoteIndexPath, snapshotid, MANIFEST_SUFFIX) + warn("Deleting inconsistent snapshot manifest: "+path.getName) + dfs.delete(path, true) + snapshots -= snapshotid + } + } + + // Add a ref to the last snapshot.. + for( (snapshotid, _) <- snapshots.lastOption ) { + snapshotRefCounters.getOrElseUpdate(snapshotid, new LongCounter()).incrementAndGet() + } + + // Increment index file refs.. + for( key <- snapshotRefCounters.keys; snapshot <- snapshots.get(key); file <- snapshot.files ) { + indexFileRefCounters.getOrElseUpdate(file, new LongCounter()).incrementAndGet() + } + + // Remove un-referenced index files. + for( (name, counter) <- indexFileRefCounters ) { + if( counter.get() <= 0 ) { + var path = new Path(remoteIndexPath, name) + info("Deleting unreferenced index file: "+path.getName) + dfs.delete(path, true) + indexFileRefCounters.remove(name) + } + } + + val local_snapshots = Map(LevelDBClient.find_sequence_files(directory, INDEX_SUFFIX).values.flatten { dir => + if( dir.isDirectory ) dir.listFiles() else Array[File]() + }.map(x=> (x.getName, x)).toSeq:_*) + + for( (id, snapshot) <- snapshots.lastOption ) { + + // increment the ref.. + tempIndexFile.recursiveDelete + tempIndexFile.mkdirs + + for( file <- snapshot.files ; if !file.endsWith(MANIFEST_SUFFIX) ) { + val target = tempIndexFile / file + + // The file might be in a local snapshot already.. + local_snapshots.get(file) match { + case Some(f) => + // had it locally.. link it. + Util.link(f, target) + case None => + // download.. + var path = new Path(remoteIndexPath, file) + info("Downloading index file: "+path) + using(dfs.open(path, 32*1024)) { is=> + using(new FileOutputStream(target)) { os=> + copy(is, os) + } + } + } + } + + val current = tempIndexFile / "CURRENT" + current.writeText(snapshot.current_manifest) + + // We got everything ok, now rename. + tempIndexFile.renameTo(LevelDBClient.create_sequence_file(directory, id, INDEX_SUFFIX)) + } + + gcSnapshotRefs + } + + def gcSnapshotRefs = { + snapshots = snapshots.filter { case (id, snapshot)=> + if (snapshotRefCounters.get(id).isDefined) { + true + } else { + for( file <- snapshot.files ) { + for( counter <- indexFileRefCounters.get(file) ) { + if( counter.decrementAndGet() <= 0 ) { + var path = new Path(remoteIndexPath, file) + info("Deleteing unreferenced index file: %s", path.getName) + dfs.delete(path, true) + indexFileRefCounters.remove(file) + } + } + } + false + } + } + } + + def uploadIndexFiles(snapshot_id:Long):Unit = { + + val source = LevelDBClient.create_sequence_file(directory, snapshot_id, INDEX_SUFFIX) + try { + + // Build the new manifest.. + val mf = new IndexManifestDTO + mf.snapshot_id = snapshot_id + mf.current_manifest = (source / "CURRENT").readText() + source.listFiles.foreach { file => + val name = file.getName + if( name !="LOCK" && name !="CURRENT") { + mf.files.add(name) + } + } + + import collection.JavaConversions._ + mf.files.foreach { file => + val refs = indexFileRefCounters.getOrElseUpdate(file, new LongCounter()) + if(refs.get()==0) { + // Upload if not not yet on the remote. + val target = new Path(remoteIndexPath, file) + using(new FileInputStream(source / file)) { is=> + using(dfs.create(target, true, 1024*32, dfsReplication.toShort, dfsBlockSize)) { os=> + copy(is, os) + } + } + } + refs.incrementAndGet() + } + + val target = create_sequence_path(remoteIndexPath, mf.snapshot_id, MANIFEST_SUFFIX) + mf.files.add(target.getName) + + indexFileRefCounters.getOrElseUpdate(target.getName, new LongCounter()).incrementAndGet() + using(dfs.create(target, true, 1024*32, dfsReplication.toShort, dfsBlockSize)) { os=> + JsonCodec.mapper.writeValue(os, mf) + } + + snapshots += snapshot_id -> Snapshot(mf.current_manifest, Set(mf.files.toSeq:_*)) + snapshotRefCounters.getOrElseUpdate(snapshot_id, new LongCounter()).incrementAndGet() + + } catch { + case e: Exception => + warn(e, "Could not upload the index: " + e) + } + } + + + + // Override the log appender implementation so that it + // stores the logs on the local and remote file systems. + override def createLog = new RecordLog(directory, LOG_SUFFIX) { + + + override protected def onDelete(file: File) = { + super.onDelete(file) + // also delete the file on the dfs. + dfs.delete(new Path(dfsDirectory, file.getName), false) + } + + override def create_log_appender(position: Long) = { + new LogAppender(next_log(position), position) { + + val dfs_path = new Path(dfsDirectory, file.getName) + debug("Opening DFS log file for append: "+dfs_path.getName) + val dfs_os = dfs.create(dfs_path, true, RecordLog.BUFFER_SIZE, dfsReplication.toShort, dfsBlockSize ) + debug("Opened") + + override def flush = this.synchronized { + if( write_buffer.position() > 0 ) { + + var buffer: Buffer = write_buffer.toBuffer + // Write it to DFS.. + buffer.writeTo(dfs_os.asInstanceOf[OutputStream]); + + // Now write it to the local FS. + val byte_buffer = buffer.toByteBuffer + val pos = append_offset-byte_buffer.remaining + flushed_offset.addAndGet(byte_buffer.remaining) + channel.write(byte_buffer, pos) + if( byte_buffer.hasRemaining ) { + throw new IOException("Short write") + } + + write_buffer.reset() + } + } + + override def force = { + dfs_os.sync() + } + + override def dispose() = { + try { + super.dispose() + } finally { + dfs_os.close() + } + } + + } + } + } +} diff --git a/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/HALevelDBStore.scala b/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/HALevelDBStore.scala new file mode 100644 index 0000000000..afe46f0213 --- /dev/null +++ b/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/HALevelDBStore.scala @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.activemq.leveldb + +import org.apache.hadoop.conf.Configuration +import org.apache.activemq.util.ServiceStopper +import org.apache.hadoop.fs.FileSystem +import scala.reflect.BeanProperty +import java.net.InetAddress + +/** + *

    + *

    + * + * @author Hiram Chirino + */ +class HALevelDBStore extends LevelDBStore { + + @BeanProperty + var dfsUrl:String = _ + @BeanProperty + var dfsConfig:String = _ + @BeanProperty + var dfsDirectory:String = _ + @BeanProperty + var dfsBlockSize = 1024*1024*50L + @BeanProperty + var dfsReplication = 1 + @BeanProperty + var containerId:String = _ + + var dfs:FileSystem = _ + + override def doStart = { + if(dfs==null) { + Thread.currentThread().setContextClassLoader(getClass.getClassLoader) + val config = new Configuration() + config.set("fs.hdfs.impl.disable.cache", "true") + config.set("fs.file.impl.disable.cache", "true") + Option(dfsConfig).foreach(config.addResource(_)) + Option(dfsUrl).foreach(config.set("fs.default.name", _)) + dfsUrl = config.get("fs.default.name") + dfs = FileSystem.get(config) + } + if ( containerId==null ) { + containerId = InetAddress.getLocalHost.getHostName + } + super.doStart + } + + override def doStop(stopper: ServiceStopper): Unit = { + super.doStop(stopper) + if(dfs!=null){ + dfs.close() + } + } + + override def createClient = new HALevelDBClient(this) +} \ No newline at end of file diff --git a/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/IndexManifestDTO.java b/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/IndexManifestDTO.java new file mode 100644 index 0000000000..56b2c96dca --- /dev/null +++ b/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/IndexManifestDTO.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.activemq.leveldb; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlAttribute; +import javax.xml.bind.annotation.XmlRootElement; +import java.util.HashSet; +import java.util.Set; + +/** + * @author Hiram Chirino + */ +@XmlRootElement(name="index_files") +@XmlAccessorType(XmlAccessType.FIELD) +public class IndexManifestDTO { + + @XmlAttribute(name = "snapshot_id") + public long snapshot_id; + + @XmlAttribute(name = "current_manifest") + public String current_manifest; + + @XmlAttribute(name = "file") + public Set files = new HashSet(); + +} diff --git a/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/LevelDBClient.scala b/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/LevelDBClient.scala new file mode 100755 index 0000000000..7ae1dc7e71 --- /dev/null +++ b/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/LevelDBClient.scala @@ -0,0 +1,1218 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.activemq.leveldb + +import java.{lang=>jl} +import java.{util=>ju} + +import java.util.concurrent.locks.ReentrantReadWriteLock +import collection.immutable.TreeMap +import collection.mutable.{HashMap, ListBuffer} +import org.iq80.leveldb._ + +import org.fusesource.hawtdispatch._ +import record.{CollectionKey, EntryKey, EntryRecord, CollectionRecord} +import util._ +import java.util.concurrent._ +import org.fusesource.hawtbuf._ +import java.io.{ObjectInputStream, ObjectOutputStream, File} +import scala.Option._ +import org.apache.activemq.command.Message +import org.apache.activemq.util.ByteSequence +import org.apache.activemq.leveldb.RecordLog.LogInfo +import java.text.SimpleDateFormat +import java.util.{Date, Collections} + +/** + * @author Hiram Chirino + */ +object LevelDBClient extends Log { + + final val STORE_SCHEMA_PREFIX = "activemq_leveldb_store:" + final val STORE_SCHEMA_VERSION = 1 + + final val THREAD_POOL_STACK_SIZE = System.getProperty("leveldb.thread.stack.size", "" + 1024 * 512).toLong + final val THREAD_POOL: ThreadPoolExecutor = new ThreadPoolExecutor(0, Integer.MAX_VALUE, 10, TimeUnit.SECONDS, new SynchronousQueue[Runnable], new ThreadFactory { + def newThread(r: Runnable): Thread = { + var rc: Thread = new Thread(null, r, "LevelDB Store Task", THREAD_POOL_STACK_SIZE) + rc.setDaemon(true) + return rc + } + }) { + override def shutdown: Unit = {} + override def shutdownNow = Collections.emptyList[Runnable] + } + + final val DIRTY_INDEX_KEY = bytes(":dirty") + final val LOG_REF_INDEX_KEY = bytes(":log-refs") + final val COLLECTION_META_KEY = bytes(":collection-meta") + final val TRUE = bytes("true") + final val FALSE = bytes("false") + final val ACK_POSITION = new AsciiBuffer("p") + + final val COLLECTION_PREFIX = 'c'.toByte + final val COLLECTION_PREFIX_ARRAY = Array(COLLECTION_PREFIX) + final val ENTRY_PREFIX = 'e'.toByte + final val ENTRY_PREFIX_ARRAY = Array(ENTRY_PREFIX) + + final val LOG_ADD_COLLECTION = 1.toByte + final val LOG_REMOVE_COLLECTION = 2.toByte + final val LOG_ADD_ENTRY = 3.toByte + final val LOG_REMOVE_ENTRY = 4.toByte + final val LOG_DATA = 5.toByte + final val LOG_TRACE = 6.toByte + + final val LOG_SUFFIX = ".log" + final val INDEX_SUFFIX = ".index" + + implicit def toByteArray(buffer:Buffer) = buffer.toByteArray + implicit def toBuffer(buffer:Array[Byte]) = new Buffer(buffer) + + def encodeCollectionRecord(v: CollectionRecord.Buffer) = v.toUnframedByteArray + def decodeCollectionRecord(data: Buffer):CollectionRecord.Buffer = CollectionRecord.FACTORY.parseUnframed(data) + def encodeCollectionKeyRecord(v: CollectionKey.Buffer) = v.toUnframedByteArray + def decodeCollectionKeyRecord(data: Buffer):CollectionKey.Buffer = CollectionKey.FACTORY.parseUnframed(data) + + def encodeEntryRecord(v: EntryRecord.Buffer) = v.toUnframedBuffer + def decodeEntryRecord(data: Buffer):EntryRecord.Buffer = EntryRecord.FACTORY.parseUnframed(data) + + def encodeEntryKeyRecord(v: EntryKey.Buffer) = v.toUnframedByteArray + def decodeEntryKeyRecord(data: Buffer):EntryKey.Buffer = EntryKey.FACTORY.parseUnframed(data) + + def encodeLocator(pos:Long, len:Int):Array[Byte] = { + val out = new DataByteArrayOutputStream( + AbstractVarIntSupport.computeVarLongSize(pos)+ + AbstractVarIntSupport.computeVarIntSize(len) + ) + out.writeVarLong(pos) + out.writeVarInt(len) + out.getData + } + def decodeLocator(bytes:Buffer):(Long, Int) = { + val in = new DataByteArrayInputStream(bytes) + (in.readVarLong(), in.readVarInt()) + } + def decodeLocator(bytes:Array[Byte]):(Long, Int) = { + val in = new DataByteArrayInputStream(bytes) + (in.readVarLong(), in.readVarInt()) + } + + def encodeLong(a1:Long) = { + val out = new DataByteArrayOutputStream(8) + out.writeLong(a1) + out.toBuffer + } + + def encodeVLong(a1:Long):Array[Byte] = { + val out = new DataByteArrayOutputStream( + AbstractVarIntSupport.computeVarLongSize(a1) + ) + out.writeVarLong(a1) + out.getData + } + + def decodeVLong(bytes:Array[Byte]):Long = { + val in = new DataByteArrayInputStream(bytes) + in.readVarLong() + } + + def encodeLongKey(a1:Byte, a2:Long):Array[Byte] = { + val out = new DataByteArrayOutputStream(9) + out.writeByte(a1.toInt) + out.writeLong(a2) + out.getData + } + def decodeLongKey(bytes:Array[Byte]):(Byte, Long) = { + val in = new DataByteArrayInputStream(bytes) + (in.readByte(), in.readLong()) + } + + def decodeLong(bytes:Buffer):Long = { + val in = new DataByteArrayInputStream(bytes) + in.readLong() + } + def decodeLong(bytes:Array[Byte]):Long = { + val in = new DataByteArrayInputStream(bytes) + in.readLong() + } + + def encodeEntryKey(a1:Byte, a2:Long, a3:Long):Array[Byte] = { + val out = new DataByteArrayOutputStream(17) + out.writeByte(a1.toInt) + out.writeLong(a2) + out.writeLong(a3) + out.getData + } + + def encodeEntryKey(a1:Byte, a2:Long, a3:Buffer):Array[Byte] = { + val out = new DataByteArrayOutputStream(9+a3.length) + out.writeByte(a1.toInt) + out.writeLong(a2) + out.write(a3) + out.getData + } + + def decodeEntryKey(bytes:Array[Byte]):(Byte, Long, Buffer) = { + val in = new DataByteArrayInputStream(bytes) + (in.readByte(), in.readLong(), in.readBuffer(in.available())) + } + + final class RichDB(val db: DB) { + + val isPureJavaVersion = db.getClass.getName == "org.iq80.leveldb.impl.DbImpl" + + def getProperty(name:String) = db.getProperty(name) + + def getApproximateSizes(ranges:Range*) = db.getApproximateSizes(ranges:_*) + + def get(key:Array[Byte], ro:ReadOptions=new ReadOptions):Option[Array[Byte]] = { + Option(db.get(key, ro)) + } + + def close:Unit = db.close() + + def delete(key:Array[Byte], wo:WriteOptions=new WriteOptions):Unit = { + db.delete(key, wo) + } + + def put(key:Array[Byte], value:Array[Byte], wo:WriteOptions=new WriteOptions):Unit = { + db.put(key, value, wo) + } + + def write[T](wo:WriteOptions=new WriteOptions, max_write_latency:TimeMetric = TimeMetric())(func: WriteBatch=>T):T = { + val updates = db.createWriteBatch() + try { + val rc=Some(func(updates)) + max_write_latency { + db.write(updates, wo) + } + return rc.get + } finally { + updates.close(); + } + } + + def store[T](write:WriteBatch, wo:WriteOptions=new WriteOptions) = { + db.write(write, wo) + } + + def snapshot[T](func: Snapshot=>T):T = { + val snapshot = db.getSnapshot + try { + func(snapshot) + } finally { + snapshot.close() + } + } + + def cursorKeys(ro:ReadOptions=new ReadOptions)(func: Array[Byte] => Boolean): Unit = { + val iterator = db.iterator(ro) + iterator.seekToFirst(); + try { + while( iterator.hasNext && func(iterator.peekNext.getKey) ) { + iterator.next() + } + } finally { + iterator.close(); + } + } + + def cursorKeysPrefixed(prefix:Array[Byte], ro:ReadOptions=new ReadOptions)(func: Array[Byte] => Boolean): Unit = { + val iterator = db.iterator(ro) + iterator.seek(prefix); + try { + def check(key:Buffer) = { + key.startsWith(prefix) && func(key) + } + while( iterator.hasNext && check(iterator.peekNext.getKey) ) { + iterator.next() + } + } finally { + iterator.close(); + } + } + + def cursorPrefixed(prefix:Array[Byte], ro:ReadOptions=new ReadOptions)(func: (Array[Byte],Array[Byte]) => Boolean): Unit = { + val iterator = db.iterator(ro) + iterator.seek(prefix); + try { + def check(key:Buffer) = { + key.startsWith(prefix) && func(key, iterator.peekNext.getValue) + } + while( iterator.hasNext && check(iterator.peekNext.getKey) ) { + iterator.next() + } + } finally { + iterator.close(); + } + } + + def compare(a1:Array[Byte], a2:Array[Byte]):Int = { + new Buffer(a1).compareTo(new Buffer(a2)) + } + + def cursorRangeKeys(startIncluded:Array[Byte], endExcluded:Array[Byte], ro:ReadOptions=new ReadOptions)(func: Array[Byte] => Boolean): Unit = { + val iterator = db.iterator(ro) + iterator.seek(startIncluded); + try { + def check(key:Array[Byte]) = { + if ( compare(key,endExcluded) < 0) { + func(key) + } else { + false + } + } + while( iterator.hasNext && check(iterator.peekNext.getKey) ) { + iterator.next() + } + } finally { + iterator.close(); + } + } + + def cursorRange(startIncluded:Array[Byte], endExcluded:Array[Byte], ro:ReadOptions=new ReadOptions)(func: (Array[Byte],Array[Byte]) => Boolean): Unit = { + val iterator = db.iterator(ro) + iterator.seek(startIncluded); + try { + def check(key:Array[Byte]) = { + (compare(key,endExcluded) < 0) && func(key, iterator.peekNext.getValue) + } + while( iterator.hasNext && check(iterator.peekNext.getKey) ) { + iterator.next() + } + } finally { + iterator.close(); + } + } + + def lastKey(prefix:Array[Byte], ro:ReadOptions=new ReadOptions): Option[Array[Byte]] = { + val last = new Buffer(prefix).deepCopy().data + if ( last.length > 0 ) { + val pos = last.length-1 + last(pos) = (last(pos)+1).toByte + } + + if(isPureJavaVersion) { + // The pure java version of LevelDB does not support backward iteration. + var rc:Option[Array[Byte]] = None + cursorRangeKeys(prefix, last) { key=> + rc = Some(key) + true + } + rc + } else { + val iterator = db.iterator(ro) + try { + + iterator.seek(last); + if ( iterator.hasPrev ) { + iterator.prev() + } else { + iterator.seekToLast() + } + + if ( iterator.hasNext ) { + val key:Buffer = iterator.peekNext.getKey + if(key.startsWith(prefix)) { + Some(key) + } else { + None + } + } else { + None + } + } finally { + iterator.close(); + } + } + } + } + + + def bytes(value:String) = value.getBytes("UTF-8") + + import FileSupport._ + def create_sequence_file(directory:File, id:Long, suffix:String) = directory / ("%016x%s".format(id, suffix)) + + def find_sequence_files(directory:File, suffix:String):TreeMap[Long, File] = { + TreeMap((directory.listFiles.flatMap { f=> + if( f.getName.endsWith(suffix) ) { + try { + val base = f.getName.stripSuffix(suffix) + val position = java.lang.Long.parseLong(base, 16); + Some(position -> f) + } catch { + case e:NumberFormatException => None + } + } else { + None + } + }): _* ) + } + + class CollectionMeta extends Serializable { + var size = 0L + var last_key:Array[Byte] = _ + } +} + + +/** + * + * @author Hiram Chirino + */ +class LevelDBClient(store: LevelDBStore) { + + import LevelDBClient._ + import FileSupport._ + + val dispatchQueue = createQueue("leveldb") + + ///////////////////////////////////////////////////////////////////// + // + // Helpers + // + ///////////////////////////////////////////////////////////////////// + + def directory = store.directory + def logDirectory = Option(store.logDirectory).getOrElse(store.directory) + + ///////////////////////////////////////////////////////////////////// + // + // Public interface used by the DBManager + // + ///////////////////////////////////////////////////////////////////// + + def sync = store.sync; + def verifyChecksums = store.verifyChecksums + + var log:RecordLog = _ + + var index:RichDB = _ + var indexOptions:Options = _ + + var lastIndexSnapshotPos:Long = _ + val snapshotRwLock = new ReentrantReadWriteLock(true) + + var factory:DBFactory = _ + val logRefs = HashMap[Long, LongCounter]() + + val collectionMeta = HashMap[Long, CollectionMeta]() + + def dirtyIndexFile = directory / ("dirty"+INDEX_SUFFIX) + def tempIndexFile = directory / ("temp"+INDEX_SUFFIX) + def snapshotIndexFile(id:Long) = create_sequence_file(directory,id, INDEX_SUFFIX) + + def createLog: RecordLog = { + new RecordLog(logDirectory, LOG_SUFFIX) + } + + var writeExecutor:ExecutorService = _ + + def storeTrace(ascii:String, force:Boolean=false) = { + val time = new SimpleDateFormat("dd/MMM/yyyy:HH:mm::ss Z").format(new Date) + log.appender { appender => + appender.append(LOG_TRACE, new AsciiBuffer("%s: %s".format(time, ascii))) + if( force ) { + appender.force + } + } + } + + def retry[T](func : =>T):T = RetrySupport.retry(LevelDBClient, store.isStarted, func _) + + def start() = { + + // Lets check store compatibility... + directory.mkdirs() + val version_file = directory / "store-version.txt" + if (version_file.exists()) { + val ver = try { + var tmp: String = version_file.readText().trim() + if (tmp.startsWith(STORE_SCHEMA_PREFIX)) { + tmp.stripPrefix(STORE_SCHEMA_PREFIX).toInt + } else { + -1 + } + } catch { + case e => throw new Exception("Unexpected version file format: " + version_file) + } + ver match { + case STORE_SCHEMA_VERSION => // All is good. + case _ => throw new Exception("Cannot open the store. It's schema version is not supported.") + } + } + version_file.writeText(STORE_SCHEMA_PREFIX + STORE_SCHEMA_VERSION) + + writeExecutor = Executors.newFixedThreadPool(1, new ThreadFactory() { + def newThread(r: Runnable) = { + val rc = new Thread(r, "LevelDB store io write") + rc.setDaemon(true) + rc + } + }) + + val factoryNames = store.indexFactory + factory = factoryNames.split("""(,|\s)+""").map(_.trim()).flatMap { name=> + try { + Some(this.getClass.getClassLoader.loadClass(name).newInstance().asInstanceOf[DBFactory]) + } catch { + case e:Throwable => + debug(e, "Could not load factory: "+name+" due to: "+e) + None + } + }.headOption.getOrElse(throw new Exception("Could not load any of the index factory classes: "+factoryNames)) + + if( factory.getClass.getName == "org.iq80.leveldb.impl.Iq80DBFactory") { + warn("Using the pure java LevelDB implementation which is still experimental. Production users should use the JNI based LevelDB implementation instead.") + } + + indexOptions = new Options(); + indexOptions.createIfMissing(true); + + indexOptions.maxOpenFiles(store.indexMaxOpenFiles) + indexOptions.blockRestartInterval(store.indexBlockRestartInterval) + indexOptions.paranoidChecks(store.paranoidChecks) + indexOptions.writeBufferSize(store.indexWriteBufferSize) + indexOptions.blockSize(store.indexBlockSize) + indexOptions.compressionType( store.indexCompression.toLowerCase match { + case "snappy" => CompressionType.SNAPPY + case "none" => CompressionType.NONE + case _ => CompressionType.SNAPPY + }) + + indexOptions.cacheSize(store.indexCacheSize) + indexOptions.logger(new Logger() { + val LOG = Log(factory.getClass.getName) + def log(msg: String) = LOG.debug("index: "+msg.stripSuffix("\n")) + }) + + log = createLog + log.logSize = store.logSize + log.on_log_rotate = ()=> { + // We snapshot the index every time we rotate the logs. + writeExecutor { + snapshotIndex(false) + } + } + + retry { + log.open + } + + // Find out what was the last snapshot. + val snapshots = find_sequence_files(directory, INDEX_SUFFIX) + var lastSnapshotIndex = snapshots.lastOption + lastIndexSnapshotPos = lastSnapshotIndex.map(_._1).getOrElse(0) + + // Only keep the last snapshot.. + snapshots.filterNot(_._1 == lastIndexSnapshotPos).foreach( _._2.recursiveDelete ) + tempIndexFile.recursiveDelete + + retry { + + // Delete the dirty indexes + dirtyIndexFile.recursiveDelete + dirtyIndexFile.mkdirs() + + lastSnapshotIndex.foreach { case (id, file) => + // Resume log replay from a snapshot of the index.. + try { + file.listFiles.foreach { file => + file.linkTo(dirtyIndexFile / file.getName) + } + } catch { + case e:Exception => + warn(e, "Could not recover snapshot of the index: "+e) + lastSnapshotIndex = None + } + } + + index = new RichDB(factory.open(dirtyIndexFile, indexOptions)); + try { + loadCounters + index.put(DIRTY_INDEX_KEY, TRUE) + // Update the index /w what was stored on the logs.. + var pos = lastIndexSnapshotPos; + var last_reported_at = System.currentTimeMillis(); + var showing_progress = false + var last_reported_pos = 0L + try { + while (pos < log.appender_limit) { + val now = System.currentTimeMillis(); + if( now > last_reported_at+1000 ) { + val at = pos-lastIndexSnapshotPos + val total = log.appender_limit-lastIndexSnapshotPos + val rate = (pos-last_reported_pos)*1000.0 / (now - last_reported_at) + val eta = (total-at)/rate + val remaining = if(eta > 60*60) { + "%.2f hrs".format(eta/(60*60)) + } else if(eta > 60) { + "%.2f mins".format(eta/60) + } else { + "%.0f secs".format(eta) + } + + System.out.print("Replaying recovery log: %f%% done (%,d/%,d bytes) @ %,.2f kb/s, %s remaining. \r".format( + at*100.0/total, at, total, rate/1024, remaining)) + showing_progress = true; + last_reported_at = now + last_reported_pos = pos + } + + + log.read(pos).map { + case (kind, data, nextPos) => + kind match { + case LOG_ADD_COLLECTION => + val record= decodeCollectionRecord(data) + index.put(encodeLongKey(COLLECTION_PREFIX, record.getKey), data) + collectionMeta.put(record.getKey, new CollectionMeta) + + case LOG_REMOVE_COLLECTION => + val record = decodeCollectionKeyRecord(data) + // Delete the entries in the collection. + index.cursorPrefixed(encodeLongKey(ENTRY_PREFIX, record.getKey), new ReadOptions) { (key, value)=> + val record = decodeEntryRecord(value) + val pos = if ( record.hasValueLocation ) { + Some(record.getValueLocation) + } else { + None + } + pos.foreach(logRefDecrement(_)) + index.delete(key) + true + } + index.delete(data) + collectionMeta.remove(record.getKey) + + case LOG_ADD_ENTRY => + val record = decodeEntryRecord(data) + + val index_record = new EntryRecord.Bean() + index_record.setValueLocation(record.getValueLocation) + index_record.setValueLength(record.getValueLength) + val index_value = encodeEntryRecord(index_record.freeze()).toByteArray + + index.put(encodeEntryKey(ENTRY_PREFIX, record.getCollectionKey, record.getEntryKey), index_value) + + if ( record.hasValueLocation ) { + logRefIncrement(record.getValueLocation) + } + collectionIncrementSize(record.getCollectionKey, record.getEntryKey.toByteArray) + + case LOG_REMOVE_ENTRY => + val record = decodeEntryRecord(data) + + // Figure out which log file this message reference is pointing at.. + if ( record.hasValueLocation ) { + logRefDecrement(record.getValueLocation) + } + + index.delete(encodeEntryKey(ENTRY_PREFIX, record.getCollectionKey, record.getEntryKey)) + collectionDecrementSize( record.getCollectionKey) + + case _ => // Skip other records, they don't modify the index. + + } + pos = nextPos + } + } + } + catch { + case e:Throwable => e.printStackTrace() + } + if(showing_progress) { + System.out.print(" \r"); + } + + } catch { + case e:Throwable => + // replay failed.. good thing we are in a retry block... + index.close + throw e; + } + } + } + + private def logRefDecrement(pos: Long) { + log.log_info(pos).foreach { logInfo => + logRefs.get(logInfo.position).foreach { counter => + if (counter.decrementAndGet() == 0) { + logRefs.remove(logInfo.position) + } + } + } + } + + private def logRefIncrement(pos: Long) { + log.log_info(pos).foreach { logInfo => + logRefs.getOrElseUpdate(logInfo.position, new LongCounter()).incrementAndGet() + } + } + + private def collectionDecrementSize(key: Long) { + collectionMeta.get(key).foreach(_.size -= 1) + } + private def collectionIncrementSize(key: Long, last_key:Array[Byte]) { + collectionMeta.get(key).foreach{ x=> + x.size += 1 + x.last_key = last_key + } + } + + private def storeCounters = { + def storeMap(key:Array[Byte], map:HashMap[Long, _ <: AnyRef]) { + val baos = new ByteArrayOutputStream() + val os = new ObjectOutputStream(baos); + os.writeInt(map.size); + map.foreach { + case (k, v) => + os.writeLong(k) + os.writeObject(v) + } + os.close() + index.put(key, baos.toByteArray) + } + storeMap(LOG_REF_INDEX_KEY, logRefs) + storeMap(COLLECTION_META_KEY, collectionMeta) + } + + private def loadCounters = { + def loadMap[T <: AnyRef](key:Array[Byte], map:HashMap[Long, T]) { + map.clear() + index.get(key, new ReadOptions).foreach { value=> + val bais = new ByteArrayInputStream(value) + val is = new ObjectInputStream(bais); + var remaining = is.readInt() + while(remaining > 0 ) { + map.put(is.readLong(), is.readObject().asInstanceOf[T]) + remaining-=1 + } + } + } + loadMap(LOG_REF_INDEX_KEY, logRefs) + loadMap(COLLECTION_META_KEY, collectionMeta) + } + + def stop() = { + if( writeExecutor!=null ) { + writeExecutor.shutdown + writeExecutor.awaitTermination(60, TimeUnit.SECONDS) + writeExecutor = null + + // this blocks until all io completes.. + // Suspend also deletes the index. + suspend() + + if (log != null) { + log.close + } + copyDirtyIndexToSnapshot + log = null + } + } + + def usingIndex[T](func: =>T):T = { + val lock = snapshotRwLock.readLock(); + lock.lock() + try { + func + } finally { + lock.unlock() + } + } + + def retryUsingIndex[T](func: =>T):T = retry(usingIndex( func )) + + /** + * TODO: expose this via management APIs, handy if you want to + * do a file system level snapshot and want the data to be consistent. + */ + def suspend() = { + // Make sure we are the only ones accessing the index. since + // we will be closing it to create a consistent snapshot. + snapshotRwLock.writeLock().lock() + + // Close the index so that it's files are not changed async on us. + storeCounters + index.put(DIRTY_INDEX_KEY, FALSE, new WriteOptions().sync(true)) + index.close + } + + /** + * TODO: expose this via management APIs, handy if you want to + * do a file system level snapshot and want the data to be consistent. + */ + def resume() = { + // re=open it.. + retry { + index = new RichDB(factory.open(dirtyIndexFile, indexOptions)); + index.put(DIRTY_INDEX_KEY, TRUE) + } + snapshotRwLock.writeLock().unlock() + } + + def copyDirtyIndexToSnapshot { + if( log.appender_limit == lastIndexSnapshotPos ) { + // no need to snapshot again... + return + } + + // Where we start copying files into. Delete this on + // restart. + val tmpDir = tempIndexFile + tmpDir.mkdirs() + + try { + + // Hard link all the index files. + dirtyIndexFile.listFiles.foreach { file => + file.linkTo(tmpDir / file.getName) + } + + // Rename to signal that the snapshot is complete. + val newSnapshotIndexPos = log.appender_limit + tmpDir.renameTo(snapshotIndexFile(newSnapshotIndexPos)) + snapshotIndexFile(lastIndexSnapshotPos).recursiveDelete + lastIndexSnapshotPos = newSnapshotIndexPos + + } catch { + case e: Exception => + // if we could not snapshot for any reason, delete it as we don't + // want a partial check point.. + warn(e, "Could not snapshot the index: " + e) + tmpDir.recursiveDelete + } + } + + def snapshotIndex(sync:Boolean=false):Unit = { + suspend() + try { + if( sync ) { + log.current_appender.force + } + if( log.appender_limit == lastIndexSnapshotPos ) { + // no need to snapshot again... + return + } + copyDirtyIndexToSnapshot + } finally { + resume() + } + } + + def purge() = { + suspend() + try{ + log.close + locked_purge + } finally { + retry { + log.open + } + resume() + } + } + + def locked_purge { + logDirectory.listFiles.foreach {x => + if (x.getName.endsWith(".log")) { + x.delete() + } + } + directory.listFiles.foreach {x => + if (x.getName.endsWith(".index")) { + x.recursiveDelete + } + } + } + + def addCollection(record: CollectionRecord.Buffer) = { + val key = encodeLongKey(COLLECTION_PREFIX, record.getKey) + val value = record.toUnframedBuffer + retryUsingIndex { + log.appender { appender => + appender.append(LOG_ADD_COLLECTION, value) + index.put(key, value.toByteArray) + } + } + collectionMeta.put(record.getKey, new CollectionMeta) + } + + def getLogAppendPosition = log.appender_limit + + def listCollections: Seq[(Long, CollectionRecord.Buffer)] = { + val rc = ListBuffer[(Long, CollectionRecord.Buffer)]() + retryUsingIndex { + val ro = new ReadOptions + ro.verifyChecksums(verifyChecksums) + ro.fillCache(false) + index.cursorPrefixed(COLLECTION_PREFIX_ARRAY, ro) { (key, value) => + rc.append(( decodeLongKey(key)._2, CollectionRecord.FACTORY.parseUnframed(value) )) + true // to continue cursoring. + } + } + rc + } + + def removeCollection(collectionKey: Long) = { + val key = encodeLongKey(COLLECTION_PREFIX, collectionKey) + val value = encodeVLong(collectionKey) + val entryKeyPrefix = encodeLongKey(ENTRY_PREFIX, collectionKey) + collectionMeta.remove(collectionKey) + retryUsingIndex { + log.appender { appender => + appender.append(LOG_REMOVE_COLLECTION, new Buffer(value)) + } + + val ro = new ReadOptions + ro.fillCache(false) + ro.verifyChecksums(verifyChecksums) + index.cursorPrefixed(entryKeyPrefix, ro) { (key, value)=> + val record = decodeEntryRecord(value) + val pos = if ( record.hasValueLocation ) { + Some(record.getValueLocation) + } else { + None + } + pos.foreach(logRefDecrement(_)) + index.delete(key) + true + } + index.delete(key) + } + } + + def collectionEmpty(collectionKey: Long) = { + val key = encodeLongKey(COLLECTION_PREFIX, collectionKey) + val value = encodeVLong(collectionKey) + val entryKeyPrefix = encodeLongKey(ENTRY_PREFIX, collectionKey) + + val meta = collectionMeta.getOrElseUpdate(collectionKey, new CollectionMeta) + meta.size = 0 + meta.last_key = null + + retryUsingIndex { + index.get(key).foreach { collectionData => + log.appender { appender => + appender.append(LOG_REMOVE_COLLECTION, new Buffer(value)) + appender.append(LOG_ADD_COLLECTION, new Buffer(collectionData)) + } + + val ro = new ReadOptions + ro.fillCache(false) + ro.verifyChecksums(verifyChecksums) + index.cursorPrefixed(entryKeyPrefix, ro) { (key, value)=> + val record = decodeEntryRecord(value) + val pos = if ( record.hasValueLocation ) { + Some(record.getValueLocation) + } else { + None + } + pos.foreach(logRefDecrement(_)) + index.delete(key) + true + } + } + } + } + + def queueCursor(collectionKey: Long, seq:Long)(func: (Message)=>Boolean) = { + collectionCursor(collectionKey, encodeLong(seq)) { (key, value) => + val seq = decodeLong(key) + var locator = (value.getValueLocation, value.getValueLength) + val msg = getMessage(locator) + msg.getMessageId().setEntryLocator((collectionKey, seq)) + msg.getMessageId().setDataLocator(locator) + func(msg) + } + } + + def getAckPosition(subKey: Long): Long = { + retryUsingIndex { + index.get(encodeEntryKey(ENTRY_PREFIX, subKey, ACK_POSITION)).map{ value=> + val record = decodeEntryRecord(value) + record.getValueLocation() + }.getOrElse(0L) + } + } + + def getMessage(locator:AnyRef):Message = { + assert(locator!=null) + val buffer = locator match { + case x:MessageRecord => + // Encoded form is still in memory.. + Some(x.data) + case (pos:Long, len:Int) => + // Load the encoded form from disk. + log.read(pos, len).map(new Buffer(_)) + } + + // Lets decode + buffer.map{ x => + var data = if( store.snappyCompressLogs ) { + Snappy.uncompress(x) + } else { + x + } + store.wireFormat.unmarshal(new ByteSequence(data.data, data.offset, data.length)).asInstanceOf[Message] + }.getOrElse(null) + } + + + def collectionCursor(collectionKey: Long, cursorPosition:Buffer)(func: (Buffer, EntryRecord.Buffer)=>Boolean) = { + val ro = new ReadOptions + ro.fillCache(true) + ro.verifyChecksums(verifyChecksums) + val start = encodeEntryKey(ENTRY_PREFIX, collectionKey, cursorPosition) + val end = encodeLongKey(ENTRY_PREFIX, collectionKey+1) + retryUsingIndex { + index.cursorRange(start, end, ro) { case (key, value) => + func(key.buffer.moveHead(9), EntryRecord.FACTORY.parseUnframed(value)) + } + } + } + + def collectionSize(collectionKey: Long) = { + collectionMeta.get(collectionKey).map(_.size).getOrElse(0L) + } + + def collectionIsEmpty(collectionKey: Long) = { + val entryKeyPrefix = encodeLongKey(ENTRY_PREFIX, collectionKey) + var empty = true + retryUsingIndex { + val ro = new ReadOptions + ro.fillCache(false) + ro.verifyChecksums(verifyChecksums) + index.cursorKeysPrefixed(entryKeyPrefix, ro) { key => + empty = false + false + } + } + empty + } + + val max_write_message_latency = TimeMetric() + val max_write_enqueue_latency = TimeMetric() + + val max_index_write_latency = TimeMetric() + + def store(uows: Array[DelayableUOW]) { + retryUsingIndex { + log.appender { appender => + + var syncNeeded = false + index.write(new WriteOptions, max_index_write_latency) { batch => + + var write_message_total = 0L + var write_enqueue_total = 0L + + uows.foreach { uow => + + + uow.actions.foreach { case (msg, action) => + val messageRecord = action.messageRecord + var log_info:LogInfo = null + var pos = -1L + var dataLocator:(Long, Int) = null + + if (messageRecord != null && messageRecord.locator==null) { + val start = System.nanoTime() + val p = appender.append(LOG_DATA, messageRecord.data) + pos = p._1 + log_info = p._2 + dataLocator = (pos, messageRecord.data.length) + messageRecord.locator = dataLocator + write_message_total += System.nanoTime() - start + } + + + action.dequeues.foreach { entry => + val keyLocation = entry.id.getEntryLocator.asInstanceOf[(Long, Long)] + val key = encodeEntryKey(ENTRY_PREFIX, keyLocation._1, keyLocation._2) + + if( dataLocator==null ) { + dataLocator = entry.id.getDataLocator match { + case x:(Long, Int) => x + case x:MessageRecord => x.locator + case _ => throw new RuntimeException("Unexpected locator type") + } + } + + val log_record = new EntryRecord.Bean() + log_record.setCollectionKey(entry.queueKey) + log_record.setEntryKey(new Buffer(key, 9, 8)) + log_record.setValueLocation(dataLocator._1) + appender.append(LOG_REMOVE_ENTRY, encodeEntryRecord(log_record.freeze())) + + batch.delete(key) + logRefDecrement(dataLocator._1) + collectionDecrementSize(entry.queueKey) + } + + action.enqueues.foreach { entry => + + if(dataLocator ==null ) { + dataLocator = entry.id.getDataLocator match { + case x:(Long, Int) => x + case x:MessageRecord => x.locator + case _ => + throw new RuntimeException("Unexpected locator type") + } + } + + val start = System.nanoTime() + + val key = encodeEntryKey(ENTRY_PREFIX, entry.queueKey, entry.queueSeq) + + assert(entry.id.getDataLocator()!=null) + + val log_record = new EntryRecord.Bean() + log_record.setCollectionKey(entry.queueKey) + log_record.setEntryKey(new Buffer(key, 9, 8)) + log_record.setValueLocation(dataLocator._1) + log_record.setValueLength(dataLocator._2) + appender.append(LOG_ADD_ENTRY, encodeEntryRecord(log_record.freeze())) + + val index_record = new EntryRecord.Bean() + index_record.setValueLocation(dataLocator._1) + index_record.setValueLength(dataLocator._2) + batch.put(key, encodeEntryRecord(index_record.freeze()).toByteArray) + + val log_data = encodeEntryRecord(log_record.freeze()) + val index_data = encodeEntryRecord(index_record.freeze()).toByteArray + + appender.append(LOG_ADD_ENTRY, log_data) + batch.put(key, index_data) + + Option(log_info).orElse(log.log_info(dataLocator._1)).foreach { logInfo => + logRefs.getOrElseUpdate(logInfo.position, new LongCounter()).incrementAndGet() + } + + collectionIncrementSize(entry.queueKey, log_record.getEntryKey.toByteArray) + write_enqueue_total += System.nanoTime() - start + } + + } + uow.subAcks.foreach { entry => + val key = encodeEntryKey(ENTRY_PREFIX, entry.subKey, ACK_POSITION) + val log_record = new EntryRecord.Bean() + log_record.setCollectionKey(entry.subKey) + log_record.setEntryKey(ACK_POSITION) + log_record.setValueLocation(entry.ackPosition) + appender.append(LOG_ADD_ENTRY, encodeEntryRecord(log_record.freeze())) + + val index_record = new EntryRecord.Bean() + index_record.setValueLocation(entry.ackPosition) + batch.put(key, encodeEntryRecord(log_record.freeze()).toByteArray) + } + + if( !syncNeeded && uow.syncNeeded ) { + syncNeeded = true + } + } + + max_write_message_latency.add(write_message_total) + max_write_enqueue_latency.add(write_enqueue_total) + } + if( syncNeeded && sync ) { + appender.force + } + } // end of log.appender { block } + + // now that data is logged.. locate message from the data in the logs + uows.foreach { uow => + uow.actions.foreach { case (msg, action) => + val messageRecord = action.messageRecord + if (messageRecord != null) { + messageRecord.id.setDataLocator(messageRecord.locator) + } + } + } + } + } + + def getCollectionEntries(collectionKey: Long, firstSeq:Long, lastSeq:Long): Seq[(Buffer, EntryRecord.Buffer)] = { + var rc = ListBuffer[(Buffer, EntryRecord.Buffer)]() + val ro = new ReadOptions + ro.verifyChecksums(verifyChecksums) + ro.fillCache(true) + retryUsingIndex { + index.snapshot { snapshot => + ro.snapshot(snapshot) + val start = encodeEntryKey(ENTRY_PREFIX, collectionKey, firstSeq) + val end = encodeEntryKey(ENTRY_PREFIX, collectionKey, lastSeq+1) + index.cursorRange( start, end, ro ) { (key, value) => + val (_, _, seq) = decodeEntryKey(key) + rc.append((seq, EntryRecord.FACTORY.parseUnframed(value))) + true + } + } + } + rc + } + + def getLastQueueEntrySeq(collectionKey: Long): Long = { + getLastCollectionEntryKey(collectionKey).map(_.bigEndianEditor().readLong()).getOrElse(0L) + } + + def getLastCollectionEntryKey(collectionKey: Long): Option[Buffer] = { + collectionMeta.get(collectionKey).flatMap(x=> Option(x.last_key)).map(new Buffer(_)) + } + + def gc(topicPositions:Seq[(Long, Long)]):Unit = { + + // Delete message refs for topics who's consumers have advanced.. + if( !topicPositions.isEmpty ) { + retryUsingIndex { + index.write(new WriteOptions, max_index_write_latency) { batch => + for( (topic, first) <- topicPositions ) { + val ro = new ReadOptions + ro.fillCache(true) + ro.verifyChecksums(verifyChecksums) + val start = encodeEntryKey(ENTRY_PREFIX, topic, 0) + val end = encodeEntryKey(ENTRY_PREFIX, topic, first) + index.cursorRange(start, end, ro) { case (key, value) => + val entry = EntryRecord.FACTORY.parseUnframed(value) + batch.delete(key) + logRefDecrement(entry.getValueLocation) + true + } + } + } + } + } + + import collection.JavaConversions._ + lastIndexSnapshotPos + val emptyJournals = log.log_infos.keySet.toSet -- logRefs.keySet + + // We don't want to delete any journals that the index has not snapshot'ed or + // the the + val deleteLimit = log.log_info(lastIndexSnapshotPos).map(_.position). + getOrElse(lastIndexSnapshotPos).min(log.appender_start) + + emptyJournals.foreach { id => + if ( id < deleteLimit ) { + log.delete(id) + } + } + } + +} diff --git a/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/LevelDBStore.scala b/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/LevelDBStore.scala new file mode 100644 index 0000000000..50473f4454 --- /dev/null +++ b/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/LevelDBStore.scala @@ -0,0 +1,622 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.activemq.leveldb + +import org.apache.activemq.broker.BrokerService +import org.apache.activemq.broker.BrokerServiceAware +import org.apache.activemq.broker.ConnectionContext +import org.apache.activemq.command._ +import org.apache.activemq.openwire.OpenWireFormat +import org.apache.activemq.usage.SystemUsage +import java.io.File +import java.io.IOException +import java.util.concurrent.ExecutionException +import java.util.concurrent.Future +import java.util.concurrent.atomic.AtomicLong +import reflect.BeanProperty +import org.apache.activemq.store._ +import java.util._ +import scala.collection.mutable.ListBuffer +import javax.management.ObjectName +import org.apache.activemq.broker.jmx.AnnotatedMBean +import org.apache.activemq.util._ +import org.apache.kahadb.util.LockFile +import org.apache.activemq.leveldb.util.{RetrySupport, FileSupport, Log} + +object LevelDBStore extends Log { + + val DONE = new CountDownFuture(); + DONE.countDown + + def toIOException(e: Throwable): IOException = { + if (e.isInstanceOf[ExecutionException]) { + var cause: Throwable = (e.asInstanceOf[ExecutionException]).getCause + if (cause.isInstanceOf[IOException]) { + return cause.asInstanceOf[IOException] + } + } + if (e.isInstanceOf[IOException]) { + return e.asInstanceOf[IOException] + } + return IOExceptionSupport.create(e) + } + + def waitOn(future: Future[AnyRef]): Unit = { + try { + future.get + } + catch { + case e: Throwable => { + throw toIOException(e) + } + } + } +} + +case class DurableSubscription(subKey:Long, topicKey:Long, info: SubscriptionInfo) { + var lastAckPosition = 0L + var cursorPosition = 0L +} + +class LevelDBStoreView(val store:LevelDBStore) extends LevelDBStoreViewMBean { + import store._ + + def getAsyncBufferSize = asyncBufferSize + def getIndexDirectory = directory.getCanonicalPath + def getLogDirectory = Option(logDirectory).getOrElse(directory).getCanonicalPath + def getIndexBlockRestartInterval = indexBlockRestartInterval + def getIndexBlockSize = indexBlockSize + def getIndexCacheSize = indexCacheSize + def getIndexCompression = indexCompression + def getIndexFactory = db.client.factory.getClass.getName + def getIndexMaxOpenFiles = indexMaxOpenFiles + def getIndexWriteBufferSize = indexWriteBufferSize + def getLogSize = logSize + def getParanoidChecks = paranoidChecks + def getSync = sync + def getVerifyChecksums = verifyChecksums + + def getUowClosedCounter = db.uowClosedCounter + def getUowCanceledCounter = db.uowCanceledCounter + def getUowStoringCounter = db.uowStoringCounter + def getUowStoredCounter = db.uowStoredCounter + + def getUowMaxCompleteLatency = db.uow_complete_latency.get + def getMaxIndexWriteLatency = db.client.max_index_write_latency.get + def getMaxLogWriteLatency = db.client.log.max_log_write_latency.get + def getMaxLogFlushLatency = db.client.log.max_log_flush_latency.get + def getMaxLogRotateLatency = db.client.log.max_log_rotate_latency.get + + def resetUowMaxCompleteLatency = db.uow_complete_latency.reset + def resetMaxIndexWriteLatency = db.client.max_index_write_latency.reset + def resetMaxLogWriteLatency = db.client.log.max_log_write_latency.reset + def resetMaxLogFlushLatency = db.client.log.max_log_flush_latency.reset + def resetMaxLogRotateLatency = db.client.log.max_log_rotate_latency.reset + + def getIndexStats = db.client.index.getProperty("leveldb.stats") +} + +import LevelDBStore._ + +class LevelDBStore extends ServiceSupport with BrokerServiceAware with PersistenceAdapter with TransactionStore { + + final val wireFormat = new OpenWireFormat + final val db = new DBManager(this) + + @BeanProperty + var directory: File = null + @BeanProperty + var logDirectory: File = null + + @BeanProperty + var logSize: Long = 1024 * 1024 * 100 + @BeanProperty + var indexFactory: String = "org.fusesource.leveldbjni.JniDBFactory, org.iq80.leveldb.impl.Iq80DBFactory" + @BeanProperty + var sync: Boolean = true + @BeanProperty + var verifyChecksums: Boolean = false + @BeanProperty + var indexMaxOpenFiles: Int = 1000 + @BeanProperty + var indexBlockRestartInterval: Int = 16 + @BeanProperty + var paranoidChecks: Boolean = false + @BeanProperty + var indexWriteBufferSize: Int = 1024*1024*6 + @BeanProperty + var indexBlockSize: Int = 4 * 1024 + @BeanProperty + var indexCompression: String = "snappy" + @BeanProperty + var logCompression: String = "none" + @BeanProperty + var indexCacheSize: Long = 1024 * 1024 * 256L + @BeanProperty + var flushDelay = 1000*5 + @BeanProperty + var asyncBufferSize = 1024*1024*4 + @BeanProperty + var monitorStats = false + @BeanProperty + var failIfLocked = false + + var purgeOnStatup: Boolean = false + var brokerService: BrokerService = null + + val queues = collection.mutable.HashMap[ActiveMQQueue, LevelDBStore#LevelDBMessageStore]() + val topics = collection.mutable.HashMap[ActiveMQTopic, LevelDBStore#LevelDBTopicMessageStore]() + val topicsById = collection.mutable.HashMap[Long, LevelDBStore#LevelDBTopicMessageStore]() + + override def toString: String = { + return "LevelDB:[" + directory.getAbsolutePath + "]" + } + + def objectName = { + var brokerON = brokerService.getBrokerObjectName + val broker_name = brokerON.getKeyPropertyList().get("BrokerName") + new ObjectName(brokerON.getDomain() + ":" + + "BrokerName="+JMXSupport.encodeObjectNamePart(broker_name)+ "," + + "Type=LevelDBStore"); + } + + def retry[T](func : =>T):T = RetrySupport.retry(LevelDBStore, isStarted, func _) + + var lock_file: LockFile = _ + + var snappyCompressLogs = false + + def doStart: Unit = { + import FileSupport._ + + snappyCompressLogs = logCompression.toLowerCase == "snappy" && Snappy != null + debug("starting") + if ( lock_file==null ) { + lock_file = new LockFile(directory / "lock", true) + } + + // Expose a JMX bean to expose the status of the store. + if(brokerService!=null){ + try { + AnnotatedMBean.registerMBean(brokerService.getManagementContext, new LevelDBStoreView(this), objectName) + } catch { + case e: Throwable => { + warn(e, "LevelDB Store could not be registered in JMX: " + e.getMessage) + } + } + } + + if (failIfLocked) { + lock_file.lock() + } else { + retry { + lock_file.lock() + } + } + + if (purgeOnStatup) { + purgeOnStatup = false + db.client.locked_purge + info("Purged: "+this) + } + + db.start + db.loadCollections + debug("started") + } + + def doStop(stopper: ServiceStopper): Unit = { + db.stop + lock_file.unlock() + if(brokerService!=null){ + brokerService.getManagementContext().unregisterMBean(objectName); + } + info("Stopped "+this) + } + + def setBrokerService(brokerService: BrokerService): Unit = { + this.brokerService = brokerService + } + + def setBrokerName(brokerName: String): Unit = { + } + + def setUsageManager(usageManager: SystemUsage): Unit = { + } + + def deleteAllMessages: Unit = { + purgeOnStatup = true + } + + def getLastMessageBrokerSequenceId: Long = { + return 0 + } + + def createTransactionStore = this + + val transactions = collection.mutable.HashMap[TransactionId, Transaction]() + + trait TransactionAction { + def apply(uow:DelayableUOW):Unit + } + + case class Transaction(id:TransactionId) { + val commitActions = ListBuffer[TransactionAction]() + def add(store:LevelDBMessageStore, message: Message, delay:Boolean) = { + commitActions += new TransactionAction() { + def apply(uow:DelayableUOW) = { + store.doAdd(uow, message, delay) + } + } + } + def remove(store:LevelDBMessageStore, msgid:MessageId) = { + commitActions += new TransactionAction() { + def apply(uow:DelayableUOW) = { + store.doRemove(uow, msgid) + } + } + } + def updateAckPosition(store:LevelDBTopicMessageStore, sub: DurableSubscription, position: Long) = { + commitActions += new TransactionAction() { + def apply(uow:DelayableUOW) = { + store.doUpdateAckPosition(uow, sub, position) + } + } + } + } + + def transaction(txid: TransactionId) = transactions.getOrElseUpdate(txid, Transaction(txid)) + + def commit(txid: TransactionId, wasPrepared: Boolean, preCommit: Runnable, postCommit: Runnable) = { + preCommit.run() + transactions.remove(txid) match { + case None=> + println("The transaction does not exist") + postCommit.run() + case Some(tx)=> + withUow { uow => + for( action <- tx.commitActions ) { + action(uow) + } + uow.addCompleteListener( postCommit.run() ) + } + } + } + + def rollback(txid: TransactionId) = { + transactions.remove(txid) match { + case None=> + println("The transaction does not exist") + case Some(tx)=> + } + } + + def prepare(tx: TransactionId) = { + sys.error("XA transactions not yet supported.") + } + def recover(listener: TransactionRecoveryListener) = { + } + + def createQueueMessageStore(destination: ActiveMQQueue) = { + this.synchronized(queues.get(destination)).getOrElse(db.createQueueStore(destination)) + } + + def createQueueMessageStore(destination: ActiveMQQueue, key: Long):LevelDBMessageStore = { + var rc = new LevelDBMessageStore(destination, key) + this.synchronized { + queues.put(destination, rc) + } + rc + } + + def removeQueueMessageStore(destination: ActiveMQQueue): Unit = this synchronized { + queues.remove(destination).foreach { store=> + db.destroyQueueStore(store.key) + } + } + + def createTopicMessageStore(destination: ActiveMQTopic): TopicMessageStore = { + this.synchronized(topics.get(destination)).getOrElse(db.createTopicStore(destination)) + } + + def createTopicMessageStore(destination: ActiveMQTopic, key: Long):LevelDBTopicMessageStore = { + var rc = new LevelDBTopicMessageStore(destination, key) + this synchronized { + topics.put(destination, rc) + topicsById.put(key, rc) + } + rc + } + + def removeTopicMessageStore(destination: ActiveMQTopic): Unit = { + topics.remove(destination).foreach { store=> + store.subscriptions.values.foreach { sub => + db.removeSubscription(sub) + } + store.subscriptions.clear() + db.destroyQueueStore(store.key) + } + } + + def getLogAppendPosition = db.getLogAppendPosition + + def getDestinations: Set[ActiveMQDestination] = { + import collection.JavaConversions._ + var rc: HashSet[ActiveMQDestination] = new HashSet[ActiveMQDestination] + rc.addAll(topics.keys) + rc.addAll(queues.keys) + return rc + } + + def getLastProducerSequenceId(id: ProducerId): Long = { + return -1 + } + + def size: Long = { + return 0 + } + + def checkpoint(sync: Boolean): Unit = db.checkpoint(sync) + + def withUow[T](func:(DelayableUOW)=>T):T = { + val uow = db.createUow + try { + func(uow) + } finally { + uow.release() + } + } + + private def subscriptionKey(clientId: String, subscriptionName: String): String = { + return clientId + ":" + subscriptionName + } + + case class LevelDBMessageStore(dest: ActiveMQDestination, val key: Long) extends AbstractMessageStore(dest) { + + protected val lastSeq: AtomicLong = new AtomicLong(0) + protected var cursorPosition: Long = 0 + + lastSeq.set(db.getLastQueueEntrySeq(key)) + + def doAdd(uow: DelayableUOW, message: Message, delay:Boolean): CountDownFuture = { + uow.enqueue(key, lastSeq.incrementAndGet, message, delay) + } + + + override def asyncAddQueueMessage(context: ConnectionContext, message: Message) = asyncAddQueueMessage(context, message, false) + override def asyncAddQueueMessage(context: ConnectionContext, message: Message, delay: Boolean): Future[AnyRef] = { + if( message.getTransactionId!=null ) { + transaction(message.getTransactionId).add(this, message, delay) + DONE + } else { + withUow { uow=> + doAdd(uow, message, delay) + } + } + } + + override def addMessage(context: ConnectionContext, message: Message) = addMessage(context, message, false) + override def addMessage(context: ConnectionContext, message: Message, delay: Boolean): Unit = { + waitOn(asyncAddQueueMessage(context, message, delay)) + } + + def doRemove(uow: DelayableUOW, id: MessageId): CountDownFuture = { + uow.dequeue(key, id) + } + + override def removeAsyncMessage(context: ConnectionContext, ack: MessageAck): Unit = { + if( ack.getTransactionId!=null ) { + transaction(ack.getTransactionId).remove(this, ack.getLastMessageId) + DONE + } else { + waitOn(withUow{uow=> + doRemove(uow, ack.getLastMessageId) + }) + } + } + + def removeMessage(context: ConnectionContext, ack: MessageAck): Unit = { + removeAsyncMessage(context, ack) + } + + def getMessage(id: MessageId): Message = { + var message: Message = db.getMessage(id) + if (message == null) { + throw new IOException("Message id not found: " + id) + } + return message + } + + def removeAllMessages(context: ConnectionContext): Unit = { + db.collectionEmpty(key) + cursorPosition = 0 + } + + def getMessageCount: Int = { + return db.collectionSize(key).toInt + } + + override def isEmpty: Boolean = { + return db.collectionIsEmpty(key) + } + + def recover(listener: MessageRecoveryListener): Unit = { + cursorPosition = db.cursorMessages(key, listener, 0) + } + + def resetBatching: Unit = { + cursorPosition = 0 + } + + def recoverNextMessages(maxReturned: Int, listener: MessageRecoveryListener): Unit = { + cursorPosition = db.cursorMessages(key, LimitingRecoveryListener(maxReturned, listener), cursorPosition) + } + + override def setBatch(id: MessageId): Unit = { + cursorPosition = db.queuePosition(id) + } + + } + + case class LimitingRecoveryListener(max: Int, listener: MessageRecoveryListener) extends MessageRecoveryListener { + private var recovered: Int = 0 + def hasSpace = recovered < max && listener.hasSpace + def recoverMessage(message: Message) = { + recovered += 1; + listener.recoverMessage(message) + } + def recoverMessageReference(ref: MessageId) = { + recovered += 1; + listener.recoverMessageReference(ref) + } + def isDuplicate(ref: MessageId) = listener.isDuplicate(ref) + } + + + // + // This gts called when the store is first loading up, it restores + // the existing durable subs.. + def createSubscription(sub:DurableSubscription) = { + this.synchronized(topicsById.get(sub.topicKey)) match { + case Some(topic) => + topic.synchronized { + topic.subscriptions.put((sub.info.getClientId, sub.info.getSubcriptionName), sub) + } + case None => + // Topic does not exist.. so kill the durable sub.. + db.removeSubscription(sub) + } + } + + + def getTopicGCPositions = { + import collection.JavaConversions._ + val topics = this.synchronized { + new ArrayList(topicsById.values()) + } + topics.flatMap(_.gcPosition).toSeq + } + + class LevelDBTopicMessageStore(dest: ActiveMQDestination, key: Long) extends LevelDBMessageStore(dest, key) with TopicMessageStore { + val subscriptions = collection.mutable.HashMap[(String, String), DurableSubscription]() + var firstSeq = 0L + + def gcPosition:Option[(Long, Long)] = { + var pos = lastSeq.get() + subscriptions.synchronized { + subscriptions.values.foreach { sub => + if( sub.lastAckPosition < pos ) { + pos = sub.lastAckPosition + } + } + if( firstSeq != pos+1) { + firstSeq = pos+1 + Some(key, firstSeq) + } else { + None + } + } + } + + def addSubsciption(info: SubscriptionInfo, retroactive: Boolean) = { + var sub = db.addSubscription(key, info) + subscriptions.synchronized { + subscriptions.put((info.getClientId, info.getSubcriptionName), sub) + } + sub.lastAckPosition = if (retroactive) 0 else lastSeq.get() + waitOn(withUow{ uow=> + uow.updateAckPosition(sub) + uow.countDownFuture + }) + } + + def getAllSubscriptions: Array[SubscriptionInfo] = subscriptions.synchronized { + subscriptions.values.map(_.info).toArray + } + + def lookupSubscription(clientId: String, subscriptionName: String): SubscriptionInfo = subscriptions.synchronized { + subscriptions.get((clientId, subscriptionName)).map(_.info).getOrElse(null) + } + + def deleteSubscription(clientId: String, subscriptionName: String): Unit = { + subscriptions.synchronized { + subscriptions.remove((clientId, subscriptionName)) + }.foreach(db.removeSubscription(_)) + } + + private def lookup(clientId: String, subscriptionName: String): Option[DurableSubscription] = subscriptions.synchronized { + subscriptions.get((clientId, subscriptionName)) + } + + def doUpdateAckPosition(uow: DelayableUOW, sub: DurableSubscription, position: Long) = { + sub.lastAckPosition = position + uow.updateAckPosition(sub) + } + + def acknowledge(context: ConnectionContext, clientId: String, subscriptionName: String, messageId: MessageId, ack: MessageAck): Unit = { + lookup(clientId, subscriptionName).foreach { sub => + var position = db.queuePosition(messageId) + if( ack.getTransactionId!=null ) { + transaction(ack.getTransactionId).updateAckPosition(this, sub, position) + DONE + } else { + waitOn(withUow{ uow=> + doUpdateAckPosition(uow, sub, position) + uow.countDownFuture + }) + } + + } + } + + def resetBatching(clientId: String, subscriptionName: String): Unit = { + lookup(clientId, subscriptionName).foreach { sub => + sub.cursorPosition = 0 + } + } + def recoverSubscription(clientId: String, subscriptionName: String, listener: MessageRecoveryListener): Unit = { + lookup(clientId, subscriptionName).foreach { sub => + sub.cursorPosition = db.cursorMessages(key, listener, sub.cursorPosition.max(sub.lastAckPosition+1)) + } + } + + def recoverNextMessages(clientId: String, subscriptionName: String, maxReturned: Int, listener: MessageRecoveryListener): Unit = { + lookup(clientId, subscriptionName).foreach { sub => + sub.cursorPosition = db.cursorMessages(key, LimitingRecoveryListener(maxReturned, listener), sub.cursorPosition.max(sub.lastAckPosition+1)) + } + } + + def getMessageCount(clientId: String, subscriptionName: String): Int = { + lookup(clientId, subscriptionName) match { + case Some(sub) => (lastSeq.get - sub.lastAckPosition).toInt + case None => 0 + } + } + + } + + /////////////////////////////////////////////////////////////////////////// + // The following methods actually have nothing to do with JMS txs... It's more like + // operation batch.. we handle that in the DBManager tho.. + /////////////////////////////////////////////////////////////////////////// + def beginTransaction(context: ConnectionContext): Unit = {} + def commitTransaction(context: ConnectionContext): Unit = {} + def rollbackTransaction(context: ConnectionContext): Unit = {} + + def createClient = new LevelDBClient(this); +} diff --git a/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/LevelDBStoreViewMBean.java b/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/LevelDBStoreViewMBean.java new file mode 100644 index 0000000000..ad90a1924b --- /dev/null +++ b/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/LevelDBStoreViewMBean.java @@ -0,0 +1,107 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.activemq.leveldb; + +import org.apache.activemq.broker.jmx.MBeanInfo; + +import java.io.File; + +/** + *

    + *

    + * + * @author Hiram Chirino + */ +public interface LevelDBStoreViewMBean { + + @MBeanInfo("The directory holding the store index data.") + String getIndexDirectory(); + + @MBeanInfo("The directory holding the store log data.") + String getLogDirectory(); + + @MBeanInfo("The size the log files are allowed to grow to.") + long getLogSize(); + + @MBeanInfo("The implementation of the LevelDB index being used.") + String getIndexFactory(); + + @MBeanInfo("Are writes synced to disk.") + boolean getSync(); + + @MBeanInfo("Is data verified against checksums as it's loaded back from disk.") + boolean getVerifyChecksums(); + + @MBeanInfo("The maximum number of open files the index will open at one time.") + int getIndexMaxOpenFiles(); + + @MBeanInfo("Number of keys between restart points for delta encoding of keys in the index") + int getIndexBlockRestartInterval(); + + @MBeanInfo("Do aggressive checking of store data") + boolean getParanoidChecks(); + + @MBeanInfo("Amount of data to build up in memory for the index before converting to a sorted on-disk file.") + int getIndexWriteBufferSize(); + + @MBeanInfo("Approximate size of user data packed per block for the index") + int getIndexBlockSize(); + + @MBeanInfo("The type of compression to use for the index") + String getIndexCompression(); + + @MBeanInfo("The size of the cache index") + long getIndexCacheSize(); + + @MBeanInfo("The maximum amount of async writes to buffer up") + int getAsyncBufferSize(); + + @MBeanInfo("The number of units of work which have been closed.") + long getUowClosedCounter(); + @MBeanInfo("The number of units of work which have been canceled.") + long getUowCanceledCounter(); + @MBeanInfo("The number of units of work which started getting stored.") + long getUowStoringCounter(); + @MBeanInfo("The number of units of work which completed getting stored") + long getUowStoredCounter(); + + @MBeanInfo("Gets and resets the maximum time (in ms) a unit of work took to complete.") + double resetUowMaxCompleteLatency(); + @MBeanInfo("Gets and resets the maximum time (in ms) an index write batch took to execute.") + double resetMaxIndexWriteLatency(); + @MBeanInfo("Gets and resets the maximum time (in ms) a log write took to execute (includes the index write latency).") + double resetMaxLogWriteLatency(); + @MBeanInfo("Gets and resets the maximum time (in ms) a log flush took to execute.") + double resetMaxLogFlushLatency(); + @MBeanInfo("Gets and resets the maximum time (in ms) a log rotation took to perform.") + double resetMaxLogRotateLatency(); + + @MBeanInfo("Gets the maximum time (in ms) a unit of work took to complete.") + double getUowMaxCompleteLatency(); + @MBeanInfo("Gets the maximum time (in ms) an index write batch took to execute.") + double getMaxIndexWriteLatency(); + @MBeanInfo("Gets the maximum time (in ms) a log write took to execute (includes the index write latency).") + double getMaxLogWriteLatency(); + @MBeanInfo("Gets the maximum time (in ms) a log flush took to execute.") + double getMaxLogFlushLatency(); + @MBeanInfo("Gets the maximum time (in ms) a log rotation took to perform.") + double getMaxLogRotateLatency(); + + @MBeanInfo("Gets the index statistics.") + String getIndexStats(); +} diff --git a/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/RecordLog.scala b/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/RecordLog.scala new file mode 100644 index 0000000000..5d92caf98c --- /dev/null +++ b/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/RecordLog.scala @@ -0,0 +1,518 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.activemq.leveldb + +import java.{lang=>jl} +import java.{util=>ju} + +import java.util.zip.CRC32 +import java.util.Map.Entry +import java.util.concurrent.atomic.AtomicLong +import java.io._ +import org.fusesource.hawtbuf.{DataByteArrayInputStream, DataByteArrayOutputStream, Buffer} +import org.fusesource.hawtdispatch.BaseRetained +import org.apache.activemq.leveldb.util.FileSupport._ +import org.apache.activemq.util.LRUCache +import util.TimeMetric._ +import util.{TimeMetric, Log} +import java.util.TreeMap + +object RecordLog extends Log { + + // The log files contain a sequence of variable length log records: + // record := header + data + // + // header := + // '*' : int8 // Start of Record Magic + // kind : int8 // Help identify content type of the data. + // checksum : uint32 // crc32c of the data[] + // length : uint32 // the length the the data + + val LOG_HEADER_PREFIX = '*'.toByte + val UOW_END_RECORD = -1.toByte + + val LOG_HEADER_SIZE = 10 + + val BUFFER_SIZE = 1024*512 + val BYPASS_BUFFER_SIZE = 1024*16 + + case class LogInfo(file:File, position:Long, length:Long) { + def limit = position+length + } + + def encode_long(a1:Long) = { + val out = new DataByteArrayOutputStream(8) + out.writeLong(a1) + out.toBuffer + } + + def decode_long(value:Buffer):Long = { + val in = new DataByteArrayInputStream(value) + in.readLong() + } + +} + +case class RecordLog(directory: File, logSuffix:String) { + import RecordLog._ + + directory.mkdirs() + + var logSize = 1024 * 1024 * 100L + var current_appender:LogAppender = _ + var verify_checksums = false + var sync = false + + val log_infos = new TreeMap[Long, LogInfo]() + + object log_mutex + + def delete(id:Long) = { + log_mutex.synchronized { + // We can't delete the current appender. + if( current_appender.position != id ) { + Option(log_infos.get(id)).foreach { info => + onDelete(info.file) + log_infos.remove(id) + } + } + } + } + + protected def onDelete(file:File) = { + file.delete() + } + + def checksum(data: Buffer): Int = { + val checksum = new CRC32 + checksum.update(data.data, data.offset, data.length) + (checksum.getValue & 0xFFFFFFFF).toInt + } + + class LogAppender(file:File, position:Long) extends LogReader(file, position) { + + val info = new LogInfo(file, position, 0) + + override def open = new RandomAccessFile(file, "rw") + + override def dispose() = { + force + super.dispose() + } + + var append_offset = 0L + val flushed_offset = new AtomicLong(0) + + def append_position = { + position+append_offset + } + + // set the file size ahead of time so that we don't have to sync the file + // meta-data on every log sync. + channel.position(logSize-1) + channel.write(new Buffer(1).toByteBuffer) + channel.force(true) + if( sync ) { + channel.position(0) + } + + val write_buffer = new DataByteArrayOutputStream(BUFFER_SIZE+LOG_HEADER_SIZE) + + def force = { + flush + if(sync) { + max_log_flush_latency { + // only need to update the file metadata if the file size changes.. + channel.force(append_offset > logSize) + } + } + } + + /** + * returns the offset position of the data record. + */ + def append(id:Byte, data: Buffer) = this.synchronized { + val record_position = append_position + val data_length = data.length + val total_length = LOG_HEADER_SIZE + data_length + + if( write_buffer.position() + total_length > BUFFER_SIZE ) { + flush + } + + val cs: Int = checksum(data) +// trace("Writing at: "+record_position+" len: "+data_length+" with checksum: "+cs) + + if( false && total_length > BYPASS_BUFFER_SIZE ) { + + // Write the header and flush.. + write_buffer.writeByte(LOG_HEADER_PREFIX) + write_buffer.writeByte(id) + write_buffer.writeInt(cs) + write_buffer.writeInt(data_length) + + append_offset += LOG_HEADER_SIZE + flush + + // Directly write the data to the channel since it's large. + val buffer = data.toByteBuffer + val pos = append_offset+LOG_HEADER_SIZE + val remaining = buffer.remaining + channel.write(buffer, pos) + flushed_offset.addAndGet(remaining) + if( buffer.hasRemaining ) { + throw new IOException("Short write") + } + append_offset += data_length + + } else { + write_buffer.writeByte(LOG_HEADER_PREFIX) + write_buffer.writeByte(id) + write_buffer.writeInt(cs) + write_buffer.writeInt(data_length) + write_buffer.write(data.data, data.offset, data_length) + append_offset += total_length + } + (record_position, info) + } + + def flush = max_log_flush_latency { this.synchronized { + if( write_buffer.position() > 0 ) { + val buffer = write_buffer.toBuffer.toByteBuffer + val remaining = buffer.remaining + val pos = append_offset-remaining + channel.write(buffer, pos) + flushed_offset.addAndGet(remaining) + if( buffer.hasRemaining ) { + throw new IOException("Short write") + } + write_buffer.reset() + } } + } + + override def check_read_flush(end_offset:Long) = { + if( flushed_offset.get() < end_offset ) { + flush + } + } + + } + + case class LogReader(file:File, position:Long) extends BaseRetained { + + def open = new RandomAccessFile(file, "r") + + val fd = open + val channel = fd.getChannel + + override def dispose() { + fd.close() + } + + def check_read_flush(end_offset:Long) = {} + + def read(record_position:Long, length:Int) = { + val offset = record_position-position + assert(offset >=0 ) + + check_read_flush(offset+LOG_HEADER_SIZE+length) + + if(verify_checksums) { + + val record = new Buffer(LOG_HEADER_SIZE+length) + + def record_is_not_changing = { + using(open) { fd => + val channel = fd.getChannel + val new_record = new Buffer(LOG_HEADER_SIZE+length) + channel.read(new_record.toByteBuffer, offset) + var same = record == new_record + println(same) + same + } + } + + if( channel.read(record.toByteBuffer, offset) != record.length ) { + assert( record_is_not_changing ) + throw new IOException("short record at position: "+record_position+" in file: "+file+", offset: "+offset) + } + + val is = new DataByteArrayInputStream(record) + val prefix = is.readByte() + if( prefix != LOG_HEADER_PREFIX ) { + assert(record_is_not_changing) + throw new IOException("invalid record at position: "+record_position+" in file: "+file+", offset: "+offset) + } + + val id = is.readByte() + val expectedChecksum = is.readInt() + val expectedLength = is.readInt() + val data = is.readBuffer(length) + + // If your reading the whole record we can verify the data checksum + if( expectedLength == length ) { + if( expectedChecksum != checksum(data) ) { + assert(record_is_not_changing) + throw new IOException("checksum does not match at position: "+record_position+" in file: "+file+", offset: "+offset) + } + } + + data + } else { + val data = new Buffer(length) + if( channel.read(data.toByteBuffer, offset+LOG_HEADER_SIZE) != data.length ) { + throw new IOException("short record at position: "+record_position+" in file: "+file+", offset: "+offset) + } + data + } + } + + def read(record_position:Long) = { + val offset = record_position-position + val header = new Buffer(LOG_HEADER_SIZE) + channel.read(header.toByteBuffer, offset) + val is = header.bigEndianEditor(); + val prefix = is.readByte() + if( prefix != LOG_HEADER_PREFIX ) { + // Does not look like a record. + throw new IOException("invalid record position") + } + val id = is.readByte() + val expectedChecksum = is.readInt() + val length = is.readInt() + val data = new Buffer(length) + + if( channel.read(data.toByteBuffer, offset+LOG_HEADER_SIZE) != length ) { + throw new IOException("short record") + } + + if(verify_checksums) { + if( expectedChecksum != checksum(data) ) { + throw new IOException("checksum does not match") + } + } + (id, data, record_position+LOG_HEADER_SIZE+length) + } + + def check(record_position:Long):Option[(Long, Option[Long])] = { + var offset = record_position-position + val header = new Buffer(LOG_HEADER_SIZE) + channel.read(header.toByteBuffer, offset) + val is = header.bigEndianEditor(); + val prefix = is.readByte() + if( prefix != LOG_HEADER_PREFIX ) { + return None // Does not look like a record. + } + val kind = is.readByte() + val expectedChecksum = is.readInt() + val length = is.readInt() + + val chunk = new Buffer(1024*4) + val chunkbb = chunk.toByteBuffer + offset += LOG_HEADER_SIZE + + // Read the data in in chunks to avoid + // OOME if we are checking an invalid record + // with a bad record length + val checksumer = new CRC32 + var remaining = length + while( remaining > 0 ) { + val chunkSize = remaining.min(1024*4); + chunkbb.position(0) + chunkbb.limit(chunkSize) + channel.read(chunkbb, offset) + if( chunkbb.hasRemaining ) { + return None + } + checksumer.update(chunk.data, 0, chunkSize) + offset += chunkSize + remaining -= chunkSize + } + + val checksum = ( checksumer.getValue & 0xFFFFFFFF).toInt + if( expectedChecksum != checksum ) { + return None + } + val uow_start_pos = if(kind == UOW_END_RECORD && length==8) Some(decode_long(chunk)) else None + return Some(record_position+LOG_HEADER_SIZE+length, uow_start_pos) + } + + def verifyAndGetEndPosition:Long = { + var pos = position; + var current_uow_start = pos + val limit = position+channel.size() + while(pos < limit) { + check(pos) match { + case Some((next, uow_start_pos)) => + uow_start_pos.foreach { uow_start_pos => + if( uow_start_pos == current_uow_start ) { + current_uow_start = next + } else { + return current_uow_start + } + } + pos = next + case None => + return current_uow_start + } + } + return current_uow_start + } + } + + def create_log_appender(position: Long) = { + new LogAppender(next_log(position), position) + } + + def create_appender(position: Long): Any = { + log_mutex.synchronized { + if(current_appender!=null) { + log_infos.put (position, new LogInfo(current_appender.file, current_appender.position, current_appender.append_offset)) + } + current_appender = create_log_appender(position) + log_infos.put(position, new LogInfo(current_appender.file, position, 0)) + } + } + + val max_log_write_latency = TimeMetric() + val max_log_flush_latency = TimeMetric() + val max_log_rotate_latency = TimeMetric() + + def open = { + log_mutex.synchronized { + log_infos.clear() + LevelDBClient.find_sequence_files(directory, logSuffix).foreach { case (position,file) => + log_infos.put(position, LogInfo(file, position, file.length())) + } + + val appendPos = if( log_infos.isEmpty ) { + 0L + } else { + val file = log_infos.lastEntry().getValue + val r = LogReader(file.file, file.position) + try { + val actualLength = r.verifyAndGetEndPosition + val updated = file.copy(length = actualLength - file.position) + log_infos.put(updated.position, updated) + if( updated.file.length != file.length ) { + // we need to truncate. + using(new RandomAccessFile(file.file, "rw")) ( _.setLength(updated.length)) + } + actualLength + } finally { + r.release() + } + } + + create_appender(appendPos) + } + } + + def close = { + log_mutex.synchronized { + current_appender.release + } + } + + def appender_limit = current_appender.append_position + def appender_start = current_appender.position + + def next_log(position:Long) = LevelDBClient.create_sequence_file(directory, position, logSuffix) + + def appender[T](func: (LogAppender)=>T):T= { + val intial_position = current_appender.append_position + try { + max_log_write_latency { + val rc = func(current_appender) + if( current_appender.append_position != intial_position ) { + // Record a UOW_END_RECORD so that on recovery we only replay full units of work. + current_appender.append(UOW_END_RECORD,encode_long(intial_position)) + } + rc + } + } finally { + current_appender.flush + max_log_rotate_latency { + log_mutex.synchronized { + if ( current_appender.append_offset >= logSize ) { + current_appender.release() + on_log_rotate() + create_appender(current_appender.append_position) + } + } + } + } + } + + var on_log_rotate: ()=>Unit = ()=>{} + + private val reader_cache = new LRUCache[File, LogReader](100) { + protected override def onCacheEviction(entry: Entry[File, LogReader]) = { + entry.getValue.release() + } + } + + def log_info(pos:Long) = log_mutex.synchronized { Option(log_infos.floorEntry(pos)).map(_.getValue) } + + private def get_reader[T](record_position:Long)(func: (LogReader)=>T) = { + + val lookup = log_mutex.synchronized { + val info = log_info(record_position) + info.map { info=> + if(info.position == current_appender.position) { + current_appender.retain() + (info, current_appender) + } else { + (info, null) + } + } + } + + lookup.map { case (info, appender) => + val reader = if( appender!=null ) { + // read from the current appender. + appender + } else { + // Checkout a reader from the cache... + reader_cache.synchronized { + var reader = reader_cache.get(info.file) + if(reader==null) { + reader = LogReader(info.file, info.position) + reader_cache.put(info.file, reader) + } + reader.retain() + reader + } + } + + try { + func(reader) + } finally { + reader.release + } + } + } + + def read(pos:Long) = { + get_reader(pos)(_.read(pos)) + } + def read(pos:Long, length:Int) = { + get_reader(pos)(_.read(pos, length)) + } + +} diff --git a/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/util/FileSupport.scala b/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/util/FileSupport.scala new file mode 100644 index 0000000000..a2b9a9e3b0 --- /dev/null +++ b/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/util/FileSupport.scala @@ -0,0 +1,296 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.activemq.leveldb.util + +import java.io._ +import org.fusesource.hawtdispatch._ +import org.apache.activemq.leveldb.LevelDBClient +import org.fusesource.leveldbjni.internal.Util +import org.apache.activemq.leveldb.util.ProcessSupport._ + +object FileSupport { + + implicit def toRichFile(file:File):RichFile = new RichFile(file) + + val onWindows = System.getProperty("os.name").toLowerCase().startsWith("windows") + private var linkStrategy = 0 + private val LOG = Log(getClass) + + def link(source:File, target:File):Unit = { + linkStrategy match { + case 0 => + // We first try to link via a native system call. Fails if + // we cannot load the JNI module. + try { + Util.link(source, target) + } catch { + case e:IOException => throw e + case e:Throwable => + // Fallback.. to a slower impl.. + LOG.debug("Native link system call not available") + linkStrategy = 5 + link(source, target) + } + + // TODO: consider implementing a case which does the native system call using JNA + + case 5 => + // Next we try to do the link by executing an + // operating system shell command + try { + if( onWindows ) { + system("fsutil", "hardlink", "create", target.getCanonicalPath, source.getCanonicalPath) match { + case(0, _, _) => // Success + case (_, out, err) => + // TODO: we might want to look at the out/err to see why it failed + // to avoid falling back to the slower strategy. + LOG.debug("fsutil OS command not available either") + linkStrategy = 10 + link(source, target) + } + } else { + system("ln", source.getCanonicalPath, target.getCanonicalPath) match { + case(0, _, _) => // Success + case (_, out, err) => None + // TODO: we might want to look at the out/err to see why it failed + // to avoid falling back to the slower strategy. + LOG.debug("ln OS command not available either") + linkStrategy = 2 + link(source, target) + } + } + } catch { + case e:Throwable => + } + case _ => + // this final strategy is slow but sure to work. + source.copyTo(target) + } + } + + def systemDir(name:String) = { + val baseValue = System.getProperty(name) + if( baseValue==null ) { + sys.error("The the %s system property is not set.".format(name)) + } + val file = new File(baseValue) + if( !file.isDirectory ) { + sys.error("The the %s system property is not set to valid directory path %s".format(name, baseValue)) + } + file + } + + case class RichFile(self:File) { + + def / (path:String) = new File(self, path) + + def linkTo(target:File) = link(self, target) + + def copyTo(target:File) = { + using(new FileOutputStream(target)){ os=> + using(new FileInputStream(self)){ is=> + FileSupport.copy(is, os) + } + } + } + + def listFiles:Array[File] = { + Option(self.listFiles()).getOrElse(Array()) + } + + def recursiveList:List[File] = { + if( self.isDirectory ) { + self :: self.listFiles.toList.flatten( _.recursiveList ) + } else { + self :: Nil + } + } + + def recursiveDelete: Unit = { + if( self.exists ) { + if( self.isDirectory ) { + self.listFiles.foreach(_.recursiveDelete) + } + self.delete + } + } + + def recursiveCopyTo(target: File) : Unit = { + if (self.isDirectory) { + target.mkdirs + self.listFiles.foreach( file=> file.recursiveCopyTo( target / file.getName) ) + } else { + self.copyTo(target) + } + } + + def readText(charset:String="UTF-8"): String = { + using(new FileInputStream(self)) { in => + FileSupport.readText(in, charset) + } + } + + def readBytes: Array[Byte] = { + using(new FileInputStream(self)) { in => + FileSupport.readBytes(in) + } + } + + def writeBytes(data:Array[Byte]):Unit = { + using(new FileOutputStream(self)) { out => + FileSupport.writeBytes(out, data) + } + } + + def writeText(data:String, charset:String="UTF-8"):Unit = { + using(new FileOutputStream(self)) { out => + FileSupport.writeText(out, data, charset) + } + } + + } + + /** + * Returns the number of bytes copied. + */ + def copy(in: InputStream, out: OutputStream): Long = { + var bytesCopied: Long = 0 + val buffer = new Array[Byte](8192) + var bytes = in.read(buffer) + while (bytes >= 0) { + out.write(buffer, 0, bytes) + bytesCopied += bytes + bytes = in.read(buffer) + } + bytesCopied + } + + def using[R,C <: Closeable](closable: C)(proc: C=>R) = { + try { + proc(closable) + } finally { + try { closable.close } catch { case ignore => } + } + } + + def readText(in: InputStream, charset:String="UTF-8"): String = { + new String(readBytes(in), charset) + } + + def readBytes(in: InputStream): Array[Byte] = { + val out = new ByteArrayOutputStream() + copy(in, out) + out.toByteArray + } + + def writeText(out: OutputStream, value: String, charset:String="UTF-8"): Unit = { + writeBytes(out, value.getBytes(charset)) + } + + def writeBytes(out: OutputStream, data: Array[Byte]): Unit = { + copy(new ByteArrayInputStream(data), out) + } + +} + +object ProcessSupport { + import FileSupport._ + + implicit def toRichProcessBuilder(self:ProcessBuilder):RichProcessBuilder = new RichProcessBuilder(self) + + case class RichProcessBuilder(self:ProcessBuilder) { + + def start(out:OutputStream=null, err:OutputStream=null, in:InputStream=null) = { + self.redirectErrorStream(out == err) + val process = self.start + if( in!=null ) { + LevelDBClient.THREAD_POOL { + try { + using(process.getOutputStream) { out => + FileSupport.copy(in, out) + } + } catch { + case _ => + } + } + } else { + process.getOutputStream.close + } + + if( out!=null ) { + LevelDBClient.THREAD_POOL { + try { + using(process.getInputStream) { in => + FileSupport.copy(in, out) + } + } catch { + case _ => + } + } + } else { + process.getInputStream.close + } + + if( err!=null && err!=out ) { + LevelDBClient.THREAD_POOL { + try { + using(process.getErrorStream) { in => + FileSupport.copy(in, err) + } + } catch { + case _ => + } + } + } else { + process.getErrorStream.close + } + process + } + + } + + implicit def toRichProcess(self:Process):RichProcess = new RichProcess(self) + + case class RichProcess(self:Process) { + def onExit(func: (Int)=>Unit) = LevelDBClient.THREAD_POOL { + self.waitFor + func(self.exitValue) + } + } + + implicit def toProcessBuilder(args:Seq[String]):ProcessBuilder = new ProcessBuilder().command(args : _*) + + def launch(command:String*)(func: (Int, Array[Byte], Array[Byte])=>Unit ):Unit = launch(command)(func) + def launch(p:ProcessBuilder, in:InputStream=null)(func: (Int, Array[Byte], Array[Byte]) => Unit):Unit = { + val out = new ByteArrayOutputStream + val err = new ByteArrayOutputStream + p.start(out, err, in).onExit { code=> + func(code, out.toByteArray, err.toByteArray) + } + } + + def system(command:String*):(Int, Array[Byte], Array[Byte]) = system(command) + def system(p:ProcessBuilder, in:InputStream=null):(Int, Array[Byte], Array[Byte]) = { + val out = new ByteArrayOutputStream + val err = new ByteArrayOutputStream + val process = p.start(out, err, in) + process.waitFor + (process.exitValue, out.toByteArray, err.toByteArray) + } + +} \ No newline at end of file diff --git a/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/util/Log.scala b/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/util/Log.scala new file mode 100644 index 0000000000..7827c67672 --- /dev/null +++ b/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/util/Log.scala @@ -0,0 +1,180 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.activemq.leveldb.util + +import java.util.concurrent.atomic.AtomicLong +import org.slf4j.{MDC, Logger, LoggerFactory} +import java.lang.{Throwable, String} + +/** + * @author Hiram Chirino + */ +object Log { + + def apply(clazz:Class[_]):Log = apply(clazz.getName.stripSuffix("$")) + + def apply(name:String):Log = new Log { + override val log = LoggerFactory.getLogger(name) + } + + def apply(value:Logger):Log = new Log { + override val log = value + } + + val exception_id_generator = new AtomicLong(System.currentTimeMillis) + def next_exception_id = exception_id_generator.incrementAndGet.toHexString +} + +/** + * @author Hiram Chirino + */ +trait Log { + import Log._ + val log = LoggerFactory.getLogger(getClass.getName.stripSuffix("$")) + + private def with_throwable(e:Throwable)(func: =>Unit) = { + if( e!=null ) { + val stack_ref = if( log.isDebugEnabled ) { + val id = next_exception_id + MDC.put("stackref", id.toString); + Some(id) + } else { + None + } + func + stack_ref.foreach { id=> + log.debug(e.toString, e) + MDC.remove("stackref") + } + } else { + func + } + } + + private def format(message:String, args:Seq[Any]) = { + if( args.isEmpty ) { + message + } else { + message.format(args.map(_.asInstanceOf[AnyRef]) : _*) + } + } + + def error(m: => String, args:Any*): Unit = { + if( log.isErrorEnabled ) { + log.error(format(m, args.toSeq)) + } + } + + def error(e: Throwable, m: => String, args:Any*): Unit = { + with_throwable(e) { + if( log.isErrorEnabled ) { + log.error(format(m, args.toSeq)) + } + } + } + + def error(e: Throwable): Unit = { + with_throwable(e) { + if( log.isErrorEnabled ) { + log.error(e.getMessage) + } + } + } + + def warn(m: => String, args:Any*): Unit = { + if( log.isWarnEnabled ) { + log.warn(format(m, args.toSeq)) + } + } + + def warn(e: Throwable, m: => String, args:Any*): Unit = { + with_throwable(e) { + if( log.isWarnEnabled ) { + log.warn(format(m, args.toSeq)) + } + } + } + + def warn(e: Throwable): Unit = { + with_throwable(e) { + if( log.isWarnEnabled ) { + log.warn(e.toString) + } + } + } + + def info(m: => String, args:Any*): Unit = { + if( log.isInfoEnabled ) { + log.info(format(m, args.toSeq)) + } + } + + def info(e: Throwable, m: => String, args:Any*): Unit = { + with_throwable(e) { + if( log.isInfoEnabled ) { + log.info(format(m, args.toSeq)) + } + } + } + + def info(e: Throwable): Unit = { + with_throwable(e) { + if( log.isInfoEnabled ) { + log.info(e.toString) + } + } + } + + + def debug(m: => String, args:Any*): Unit = { + if( log.isDebugEnabled ) { + log.debug(format(m, args.toSeq)) + } + } + + def debug(e: Throwable, m: => String, args:Any*): Unit = { + if( log.isDebugEnabled ) { + log.debug(format(m, args.toSeq), e) + } + } + + def debug(e: Throwable): Unit = { + if( log.isDebugEnabled ) { + log.debug(e.toString, e) + } + } + + def trace(m: => String, args:Any*): Unit = { + if( log.isTraceEnabled ) { + log.trace(format(m, args.toSeq)) + } + } + + def trace(e: Throwable, m: => String, args:Any*): Unit = { + if( log.isTraceEnabled ) { + log.trace(format(m, args.toSeq), e) + } + } + + def trace(e: Throwable): Unit = { + if( log.isTraceEnabled ) { + log.trace(e.toString, e) + } + } + +} \ No newline at end of file diff --git a/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/util/LongCounter.scala b/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/util/LongCounter.scala new file mode 100644 index 0000000000..e6a5a6f034 --- /dev/null +++ b/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/util/LongCounter.scala @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.activemq.leveldb.util + +/** + *

    + *

    + * + * @author Hiram Chirino + */ +class LongCounter(private var value:Long = 0) extends Serializable { + + def clear() = value=0 + def get() = value + def set(value:Long) = this.value = value + + def incrementAndGet() = addAndGet(1) + def decrementAndGet() = addAndGet(-1) + def addAndGet(amount:Long) = { + value+=amount + value + } + + def getAndIncrement() = getAndAdd(1) + def getAndDecrement() = getAndAdd(-11) + def getAndAdd(amount:Long) = { + val rc = value + value+=amount + rc + } + + override def toString() = get().toString +} \ No newline at end of file diff --git a/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/util/RetrySupport.scala b/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/util/RetrySupport.scala new file mode 100644 index 0000000000..5d492def61 --- /dev/null +++ b/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/util/RetrySupport.scala @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.activemq.leveldb.util + +/** + *

    + *

    + * + * @author Hiram Chirino + */ +object RetrySupport { + + def retry[T](log:Log, isStarted: ()=>Boolean, func: ()=>T): T = { + import log._ + var error:Throwable = null + var rc:Option[T] = None + + // We will loop until the tx succeeds. Perhaps it's + // failing due to a temporary condition like low disk space. + while(!rc.isDefined) { + + try { + rc = Some(func()) + } catch { + case e:Throwable => + e.printStackTrace() + if( error==null ) { + warn(e, "DB operation failed. (entering recovery mode)") + } + error = e + } + + if (!rc.isDefined) { + // We may need to give up if the store is being stopped. + if ( !isStarted() ) { + throw error + } + Thread.sleep(1000) + } + } + + if( error!=null ) { + info("DB recovered from failure.") + } + rc.get + } + +} \ No newline at end of file diff --git a/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/util/TimeMetric.scala b/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/util/TimeMetric.scala new file mode 100644 index 0000000000..5d4916299a --- /dev/null +++ b/activemq-leveldb/src/main/scala/org/apache/activemq/leveldb/util/TimeMetric.scala @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.activemq.leveldb.util + +case class TimeMetric() { + var max = 0L + + def add(duration:Long) = this.synchronized { + max = max.max(duration) + } + + def get = { + this.synchronized { + max + } / 1000000.0 + } + def reset = { + this.synchronized { + val rc = max + max = 0 + rc + } / 1000000.0 + } + + def apply[T](func: =>T):T = { + val start = System.nanoTime() + try { + func + } finally { + add(System.nanoTime() - start) + } + } + +} + diff --git a/activemq-core/src/main/java/org/apache/activemq/store/leveldb/package.html b/activemq-leveldb/src/main/scala/org/apache/activemq/store/leveldb/package.html similarity index 100% rename from activemq-core/src/main/java/org/apache/activemq/store/leveldb/package.html rename to activemq-leveldb/src/main/scala/org/apache/activemq/store/leveldb/package.html diff --git a/activemq-core/src/test/java/org/apache/activemq/store/leveldb/LevelDBConfigTest.java b/activemq-leveldb/src/test/java/org/apache/activemq/store/leveldb/LevelDBConfigTest.java similarity index 100% rename from activemq-core/src/test/java/org/apache/activemq/store/leveldb/LevelDBConfigTest.java rename to activemq-leveldb/src/test/java/org/apache/activemq/store/leveldb/LevelDBConfigTest.java diff --git a/activemq-core/src/test/java/org/apache/activemq/store/leveldb/LevelDBStoreBrokerTest.java b/activemq-leveldb/src/test/java/org/apache/activemq/store/leveldb/LevelDBStoreBrokerTest.java similarity index 97% rename from activemq-core/src/test/java/org/apache/activemq/store/leveldb/LevelDBStoreBrokerTest.java rename to activemq-leveldb/src/test/java/org/apache/activemq/store/leveldb/LevelDBStoreBrokerTest.java index be6deb99ca..156e9ca781 100644 --- a/activemq-core/src/test/java/org/apache/activemq/store/leveldb/LevelDBStoreBrokerTest.java +++ b/activemq-leveldb/src/test/java/org/apache/activemq/store/leveldb/LevelDBStoreBrokerTest.java @@ -23,7 +23,7 @@ import org.apache.activemq.broker.BrokerService; import org.apache.activemq.broker.BrokerTest; import org.apache.activemq.store.kahadb.KahaDBStore; import org.apache.activemq.util.IOHelper; -import org.fusesource.mq.leveldb.LevelDBStore; +import org.apache.activemq.leveldb.LevelDBStore; /** * Once the wire format is completed we can test against real persistence storage. diff --git a/activemq-leveldb/src/test/resources/log4j.properties b/activemq-leveldb/src/test/resources/log4j.properties new file mode 100755 index 0000000000..fd5a31bd24 --- /dev/null +++ b/activemq-leveldb/src/test/resources/log4j.properties @@ -0,0 +1,36 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# The logging properties used during tests.. +# +log4j.rootLogger=WARN, console, file +log4j.logger.org.apache.activemq=INFO +log4j.logger.org.fusesource=INFO + +# Console will only display warnnings +log4j.appender.console=org.apache.log4j.ConsoleAppender +log4j.appender.console.layout=org.apache.log4j.PatternLayout +log4j.appender.console.layout.ConversionPattern=%-5p | %t | %m%n +log4j.appender.console.threshold=TRACE + +# File appender will contain all info messages +log4j.appender.file=org.apache.log4j.FileAppender +log4j.appender.file.layout=org.apache.log4j.PatternLayout +log4j.appender.file.layout.ConversionPattern=%d | %-5p | %m | %c | %t%n +log4j.appender.file.file=target/test.log +log4j.appender.file.append=true diff --git a/activemq-core/src/test/resources/org/apache/activemq/store/leveldb/leveldb.xml b/activemq-leveldb/src/test/resources/org/apache/activemq/store/leveldb/leveldb.xml similarity index 100% rename from activemq-core/src/test/resources/org/apache/activemq/store/leveldb/leveldb.xml rename to activemq-leveldb/src/test/resources/org/apache/activemq/store/leveldb/leveldb.xml diff --git a/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/ActiveMQScenario.scala b/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/ActiveMQScenario.scala new file mode 100644 index 0000000000..e8472ea293 --- /dev/null +++ b/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/ActiveMQScenario.scala @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.activemq.leveldb + +import org.apache.activemq.spring.ActiveMQConnectionFactory +import javax.jms.{Destination, ConnectionFactory} +import org.apache.activemq.command.{ActiveMQTopic, ActiveMQQueue} + +/** + *

    + * ActiveMQ implementation of the JMS Scenario class. + *

    + * + * @author Hiram Chirino + */ +class ActiveMQScenario extends JMSClientScenario { + + override protected def factory:ConnectionFactory = { + val rc = new ActiveMQConnectionFactory + rc.setBrokerURL(url) + rc + } + + override protected def destination(i:Int):Destination = destination_type match { + case "queue" => new ActiveMQQueue(indexed_destination_name(i)) + case "topic" => new ActiveMQTopic(indexed_destination_name(i)) + case _ => error("Unsuported destination type: "+destination_type) + } + +} diff --git a/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/EnqueueRateScenariosTest.scala b/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/EnqueueRateScenariosTest.scala new file mode 100644 index 0000000000..a311c6556b --- /dev/null +++ b/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/EnqueueRateScenariosTest.scala @@ -0,0 +1,174 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.activemq.leveldb + +import junit.framework.TestCase +import org.apache.activemq.broker._ +import org.apache.activemq.store._ +import java.io.File +import junit.framework.Assert._ +import org.apache.commons.math.stat.descriptive.DescriptiveStatistics +import region.policy.{PolicyEntry, PolicyMap} + +/** + *

    + *

    + * + * @author Hiram Chirino + */ +class EnqueueRateScenariosTest extends TestCase { + + var broker: BrokerService = null + + override def setUp() { + import collection.JavaConversions._ + broker = new BrokerService + broker.setDeleteAllMessagesOnStartup(true) + broker.setPersistenceAdapter(createStore) + broker.addConnector("tcp://0.0.0.0:0") +// val policies = new PolicyMap(); +// val entry = new PolicyEntry +// entry.setQueue(">") +// policies.setPolicyEntries(List(entry)) +// broker.setDestinationPolicy(policies) + broker.start + broker.waitUntilStarted() + } + + override def tearDown() = { + if (broker != null) { + broker.stop + broker.waitUntilStopped + } + } + + protected def canceledEnqueues() = + broker.getPersistenceAdapter.asInstanceOf[LevelDBStore].db.uowCanceledCounter + + protected def enqueueOptimized() = + broker.getPersistenceAdapter.asInstanceOf[LevelDBStore].db.uowEnqueueDelayReqested + + protected def enqueueNotOptimized() = + broker.getPersistenceAdapter.asInstanceOf[LevelDBStore].db.uowEnqueueNodelayReqested + + + protected def createStore: PersistenceAdapter = { + var store: LevelDBStore = new LevelDBStore + store.setDirectory(new File("target/activemq-data/leveldb")) + return store + } + + def collect_benchmark(scenario:ActiveMQScenario, warmup:Int, samples_count:Int) = { + val (cancels, optimized, unoptimized) = scenario.with_load { + println("Warming up for %d seconds...".format(warmup)) + Thread.sleep(warmup*1000) + println("Sampling...") + scenario.collection_start + val cancelStart = canceledEnqueues + val enqueueOptimizedStart = enqueueOptimized + val enqueueNotOptimizedStart = enqueueNotOptimized + for (i <- 0 until samples_count) { + Thread.sleep(1000); + scenario.collection_sample + } + (canceledEnqueues-cancelStart, enqueueOptimized-enqueueOptimizedStart, enqueueNotOptimized-enqueueNotOptimizedStart) + } + println("Done.") + + var samples = scenario.collection_end + val error_rates = samples.get("e_custom").get.map(_._2) + assertFalse("Errors occured during scenario run: "+error_rates, error_rates.find(_ > 0 ).isDefined ) + + val producer_stats = new DescriptiveStatistics(); + for( producer_rates <- samples.get("p_custom") ) { + for( i <- producer_rates ) { + producer_stats.addValue(i._2) + } + } + + val consumer_stats = new DescriptiveStatistics(); + for( consumer_rates <- samples.get("c_custom") ) { + for( i <- consumer_rates ) { + consumer_stats.addValue(i._2) + } + } + + (producer_stats, consumer_stats, cancels*1.0/samples_count, optimized*1.0/samples_count, unoptimized*1.0/samples_count) + } + + def benchmark(name:String, warmup:Int=3, samples_count:Int=15, async_send:Boolean=true)(setup:(ActiveMQScenario)=>Unit) = { + println("Benchmarking: "+name) + var options: String = "?jms.watchTopicAdvisories=false&jms.useAsyncSend="+async_send + val url = broker.getTransportConnectors.get(0).getConnectUri + options + + val scenario = new ActiveMQScenario + scenario.url = url + scenario.display_errors = true + scenario.persistent = true + scenario.message_size = 1024 * 3 + + setup(scenario) + val (producer_stats, consumer_stats, cancels, optimized, unoptimized) = collect_benchmark(scenario, warmup, samples_count) + + println("%s: producer avg msg/sec: %,.2f, stddev: %,.2f".format(name, producer_stats.getMean, producer_stats.getStandardDeviation)) + println("%s: consumer avg msg/sec: %,.2f, stddev: %,.2f".format(name, consumer_stats.getMean, consumer_stats.getStandardDeviation)) + println("%s: canceled enqueues/sec: %,.2f".format(name,cancels)) + println("%s: optimized enqueues/sec: %,.2f".format(name,optimized)) + println("%s: unoptimized enqueues/sec: %,.2f".format(name,unoptimized)) + + (producer_stats, consumer_stats, cancels, optimized, unoptimized) + } + + def testHighCancelRatio = { + val (producer_stats, consumer_stats, cancels, optimized, unoptimized) = benchmark("both_connected_baseline") { scenario=> + scenario.producers = 1 + scenario.consumers = 1 + } + val cancel_ratio = cancels / producer_stats.getMean + assertTrue("Expecting more than 80%% of the enqueues get canceled. But only %.2f%% was canceled".format(cancel_ratio*100), cancel_ratio > .80) + } + + def testDecoupledProducerRate = { + + // Fill up the queue with messages.. for the benefit of the next benchmark.. + val from_1_to_0 = benchmark("from_1_to_0", 60) { scenario=> + scenario.producers = 1 + scenario.consumers = 0 + } + val from_1_to_10 = benchmark("from_1_to_10") { scenario=> + scenario.producers = 1 + scenario.consumers = 10 + } + val from_1_to_1 = benchmark("from_1_to_1") { scenario=> + scenario.producers = 1 + scenario.consumers = 1 + } + + var percent_diff0 = (1.0 - (from_1_to_0._1.getMean / from_1_to_1._1.getMean)).abs * 100 + var percent_diff1 = (1.0 - (from_1_to_1._1.getMean / from_1_to_10._1.getMean)).abs * 100 + + var msg0 = "The 0 vs 1 consumer scenario producer rate was within %.2f%%".format(percent_diff0) + var msg1 = "The 1 vs 10 consumer scenario producer rate was within %.2f%%".format(percent_diff1) + + println(msg0) + println(msg1) + + assertTrue(msg0, percent_diff0 <= 60) + assertTrue(msg1, percent_diff1 <= 20) + } + +} \ No newline at end of file diff --git a/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/HALevelDBFastEnqueueTest.scala b/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/HALevelDBFastEnqueueTest.scala new file mode 100644 index 0000000000..e2266610f7 --- /dev/null +++ b/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/HALevelDBFastEnqueueTest.scala @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.activemq.leveldb + +import org.apache.hadoop.fs.FileUtil +import java.io.File +import java.util.concurrent.TimeUnit + +/** + *

    + *

    + * + * @author Hiram Chirino + */ +class HALevelDBFastEnqueueTest extends LevelDBFastEnqueueTest { + + override def setUp: Unit = { + TestingHDFSServer.start + super.setUp + } + + override def tearDown: Unit = { + super.tearDown + TestingHDFSServer.stop + } + + override protected def createStore: LevelDBStore = { + var store: HALevelDBStore = new HALevelDBStore + store.setDirectory(dataDirectory) + store.setDfsDirectory("target/activemq-data/hdfs-leveldb") + return store + } + + private def dataDirectory: File = { + return new File("target/activemq-data/leveldb") + } + + /** + * On restart we will also delete the local file system store, so that we test restoring from + * HDFS. + */ + override protected def restartBroker(restartDelay: Int, checkpoint: Int): Unit = { + stopBroker + FileUtil.fullyDelete(dataDirectory) + TimeUnit.MILLISECONDS.sleep(restartDelay) + startBroker(false, checkpoint) + } +} \ No newline at end of file diff --git a/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/HALevelDBStoreTest.scala b/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/HALevelDBStoreTest.scala new file mode 100644 index 0000000000..38206495de --- /dev/null +++ b/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/HALevelDBStoreTest.scala @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.activemq.leveldb + +import org.apache.activemq.store.PersistenceAdapter +import java.io.File + +/** + *

    + *

    + * + * @author Hiram Chirino + */ +class HALevelDBStoreTest extends LevelDBStoreTest { + override protected def setUp: Unit = { + TestingHDFSServer.start + super.setUp + } + + override protected def tearDown: Unit = { + super.tearDown + TestingHDFSServer.stop + } + + override protected def createPersistenceAdapter(delete: Boolean): PersistenceAdapter = { + var store: HALevelDBStore = new HALevelDBStore + store.setDirectory(new File("target/activemq-data/haleveldb")) + store.setDfsDirectory("localhost") + if (delete) { + store.deleteAllMessages + } + return store + } +} \ No newline at end of file diff --git a/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/IDERunner.scala b/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/IDERunner.scala new file mode 100644 index 0000000000..7eeb110528 --- /dev/null +++ b/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/IDERunner.scala @@ -0,0 +1,26 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.activemq.leveldb + +import org.apache.activemq.console.Main + +object IDERunner { + def main(args:Array[String]) ={ + Main.main(args) + } +} \ No newline at end of file diff --git a/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/JMSClientScenario.scala b/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/JMSClientScenario.scala new file mode 100644 index 0000000000..993d145d91 --- /dev/null +++ b/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/JMSClientScenario.scala @@ -0,0 +1,204 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.activemq.leveldb + +import java.lang.Thread +import javax.jms._ + +/** + *

    + * Simulates load on a JMS sever using the JMS messaging API. + *

    + * + * @author Hiram Chirino + */ +abstract class JMSClientScenario extends Scenario { + + def createProducer(i:Int) = { + new ProducerClient(i) + } + def createConsumer(i:Int) = { + new ConsumerClient(i) + } + + protected def destination(i:Int):Destination + + def indexed_destination_name(i:Int) = destination_type match { + case "queue" => queue_prefix+destination_name+"-"+(i%destination_count) + case "topic" => topic_prefix+destination_name+"-"+(i%destination_count) + case _ => error("Unsuported destination type: "+destination_type) + } + + + protected def factory:ConnectionFactory + + def jms_ack_mode = { + ack_mode match { + case "auto" => Session.AUTO_ACKNOWLEDGE + case "client" => Session.CLIENT_ACKNOWLEDGE + case "dups_ok" => Session.DUPS_OK_ACKNOWLEDGE + case "transacted" => Session.SESSION_TRANSACTED + case _ => throw new Exception("Invalid ack mode: "+ack_mode) + } + } + + trait JMSClient extends Client { + + @volatile + var connection:Connection = _ + var message_counter=0L + + var worker = new Thread() { + override def run() { + var reconnect_delay = 0 + while( !done.get ) { + try { + + if( reconnect_delay!=0 ) { + Thread.sleep(reconnect_delay) + reconnect_delay=0 + } + connection = factory.createConnection(user_name, password) +// connection.setClientID(name) + connection.setExceptionListener(new ExceptionListener { + def onException(exception: JMSException) { + } + }) + connection.start() + + execute + + } catch { + case e:Throwable => + if( !done.get ) { + if( display_errors ) { + e.printStackTrace + } + error_counter.incrementAndGet + reconnect_delay = 1000 + } + } finally { + dispose + } + } + } + } + + def dispose { + try { + connection.close() + } catch { + case _ => + } + } + + def execute:Unit + + def start = { + worker.start + } + + def shutdown = { + assert(done.get) + if ( worker!=null ) { + dispose + worker.join(1000) + while(worker.isAlive ) { + println("Worker did not shutdown quickly.. interrupting thread.") + worker.interrupt() + worker.join(1000) + } + worker = null + } + } + + def name:String + } + + class ConsumerClient(val id: Int) extends JMSClient { + val name: String = "consumer " + id + + def execute { + var session = connection.createSession(false, jms_ack_mode) + var consumer:MessageConsumer = if( durable ) { + session.createDurableSubscriber(destination(id).asInstanceOf[Topic], name, selector, no_local) + } else { + session.createConsumer(destination(id), selector, no_local) + } + + while( !done.get() ) { + val msg = consumer.receive(500) + if( msg!=null ) { + consumer_counter.incrementAndGet() + if (consumer_sleep != 0) { + Thread.sleep(consumer_sleep) + } + if(session.getAcknowledgeMode == Session.CLIENT_ACKNOWLEDGE) { + msg.acknowledge(); + } + } + } + } + + } + + class ProducerClient(val id: Int) extends JMSClient { + + val name: String = "producer " + id + + def execute { + val session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE) + val producer:MessageProducer = session.createProducer(destination(id)) + producer.setDeliveryMode(if( persistent ) { + DeliveryMode.PERSISTENT + } else { + DeliveryMode.NON_PERSISTENT + }) + + val msg = session.createTextMessage(body(name)) + headers_for(id).foreach { case (key, value) => + msg.setStringProperty(key, value) + } + + while( !done.get() ) { + producer.send(msg) + producer_counter.incrementAndGet() + if (producer_sleep != 0) { + Thread.sleep(producer_sleep) + } + } + + } + } + + def body(name:String) = { + val buffer = new StringBuffer(message_size) + buffer.append("Message from " + name+"\n") + for( i <- buffer.length to message_size ) { + buffer.append(('a'+(i%26)).toChar) + } + var rc = buffer.toString + if( rc.length > message_size ) { + rc.substring(0, message_size) + } else { + rc + } + } + + + +} diff --git a/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/LevelDBFastEnqueueTest.scala b/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/LevelDBFastEnqueueTest.scala new file mode 100644 index 0000000000..d680dd3bbf --- /dev/null +++ b/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/LevelDBFastEnqueueTest.scala @@ -0,0 +1,206 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.activemq.leveldb + +import org.apache.activemq.ActiveMQConnection +import org.apache.activemq.ActiveMQConnectionFactory +import org.apache.activemq.broker.BrokerService +import org.apache.activemq.command.ActiveMQQueue +import org.apache.activemq.command.ConnectionControl +import org.junit.After +import org.junit.Before +import org.junit.Test +import javax.jms._ +import java.io.File +import java.util.Vector +import java.util.concurrent.ExecutorService +import java.util.concurrent.Executors +import java.util.concurrent.TimeUnit +import java.util.concurrent.atomic.AtomicLong +import junit.framework.Assert._ +import org.apache.activemq.leveldb.util.Log +import junit.framework.TestCase + +object LevelDBFastEnqueueTest extends Log +class LevelDBFastEnqueueTest extends TestCase { + + import LevelDBFastEnqueueTest._ + + @Test def testPublishNoConsumer: Unit = { + startBroker(true, 10) + val sharedCount: AtomicLong = new AtomicLong(toSend) + var start: Long = System.currentTimeMillis + var executorService: ExecutorService = Executors.newCachedThreadPool + var i: Int = 0 + while (i < parallelProducer) { + executorService.execute(new Runnable { + def run: Unit = { + try { + publishMessages(sharedCount, 0) + } + catch { + case e: Exception => { + exceptions.add(e) + } + } + } + }) + i += 1 + } + executorService.shutdown + executorService.awaitTermination(30, TimeUnit.MINUTES) + assertTrue("Producers done in time", executorService.isTerminated) + assertTrue("No exceptions: " + exceptions, exceptions.isEmpty) + var totalSent: Long = toSend * payloadString.length + var duration: Double = System.currentTimeMillis - start + info("Duration: " + duration + "ms") + info("Rate: " + (toSend * 1000 / duration) + "m/s") + info("Total send: " + totalSent) + info("Total journal write: " + store.getLogAppendPosition) + info("Journal writes %: " + store.getLogAppendPosition / totalSent.asInstanceOf[Double] * 100 + "%") + stopBroker + restartBroker(0, 1200000) + consumeMessages(toSend) + } + + @Test def testPublishNoConsumerNoCheckpoint: Unit = { + toSend = 100 + startBroker(true, 0) + val sharedCount: AtomicLong = new AtomicLong(toSend) + var start: Long = System.currentTimeMillis + var executorService: ExecutorService = Executors.newCachedThreadPool + var i: Int = 0 + while (i < parallelProducer) { + executorService.execute(new Runnable { + def run: Unit = { + try { + publishMessages(sharedCount, 0) + } + catch { + case e: Exception => { + exceptions.add(e) + } + } + } + }) + i += 1; + } + executorService.shutdown + executorService.awaitTermination(30, TimeUnit.MINUTES) + assertTrue("Producers done in time", executorService.isTerminated) + assertTrue("No exceptions: " + exceptions, exceptions.isEmpty) + var totalSent: Long = toSend * payloadString.length + broker.getAdminView.gc + var duration: Double = System.currentTimeMillis - start + info("Duration: " + duration + "ms") + info("Rate: " + (toSend * 1000 / duration) + "m/s") + info("Total send: " + totalSent) + info("Total journal write: " + store.getLogAppendPosition) + info("Journal writes %: " + store.getLogAppendPosition / totalSent.asInstanceOf[Double] * 100 + "%") + stopBroker + restartBroker(0, 0) + consumeMessages(toSend) + } + + private def consumeMessages(count: Long): Unit = { + var connection: ActiveMQConnection = connectionFactory.createConnection.asInstanceOf[ActiveMQConnection] + connection.setWatchTopicAdvisories(false) + connection.start + var session: Session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE) + var consumer: MessageConsumer = session.createConsumer(destination) + var i: Int = 0 + while (i < count) { + assertNotNull("got message " + i, consumer.receive(10000)) + i += 1; + } + assertNull("none left over", consumer.receive(2000)) + } + + protected def restartBroker(restartDelay: Int, checkpoint: Int): Unit = { + stopBroker + TimeUnit.MILLISECONDS.sleep(restartDelay) + startBroker(false, checkpoint) + } + + override def tearDown() = stopBroker + + def stopBroker: Unit = { + if (broker != null) { + broker.stop + broker.waitUntilStopped + } + } + + private def publishMessages(count: AtomicLong, expiry: Int): Unit = { + var connection: ActiveMQConnection = connectionFactory.createConnection.asInstanceOf[ActiveMQConnection] + connection.setWatchTopicAdvisories(false) + connection.start + var session: Session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE) + var producer: MessageProducer = session.createProducer(destination) + var start: Long = System.currentTimeMillis + var i: Long = 0l + var bytes: Array[Byte] = payloadString.getBytes + while ((({ + i = count.getAndDecrement; i + })) > 0) { + var message: Message = null + if (useBytesMessage) { + message = session.createBytesMessage + (message.asInstanceOf[BytesMessage]).writeBytes(bytes) + } + else { + message = session.createTextMessage(payloadString) + } + producer.send(message, DeliveryMode.PERSISTENT, 5, expiry) + if (i != toSend && i % sampleRate == 0) { + var now: Long = System.currentTimeMillis + info("Remainder: " + i + ", rate: " + sampleRate * 1000 / (now - start) + "m/s") + start = now + } + } + connection.syncSendPacket(new ConnectionControl) + connection.close + } + + def startBroker(deleteAllMessages: Boolean, checkPointPeriod: Int): Unit = { + broker = new BrokerService + broker.setDeleteAllMessagesOnStartup(deleteAllMessages) + store = createStore + broker.setPersistenceAdapter(store) + broker.addConnector("tcp://0.0.0.0:0") + broker.start + var options: String = "?jms.watchTopicAdvisories=false&jms.useAsyncSend=true&jms.alwaysSessionAsync=false&jms.dispatchAsync=false&socketBufferSize=131072&ioBufferSize=16384&wireFormat.tightEncodingEnabled=false&wireFormat.cacheSize=8192" + connectionFactory = new ActiveMQConnectionFactory(broker.getTransportConnectors.get(0).getConnectUri + options) + } + + protected def createStore: LevelDBStore = { + var store: LevelDBStore = new LevelDBStore + store.setDirectory(new File("target/activemq-data/leveldb")) + return store + } + + private[leveldb] var broker: BrokerService = null + private[leveldb] var connectionFactory: ActiveMQConnectionFactory = null + private[leveldb] var store: LevelDBStore = null + private[leveldb] var destination: Destination = new ActiveMQQueue("Test") + private[leveldb] var payloadString: String = new String(new Array[Byte](6 * 1024)) + private[leveldb] var useBytesMessage: Boolean = true + private[leveldb] final val parallelProducer: Int = 20 + private[leveldb] var exceptions: Vector[Exception] = new Vector[Exception] + private[leveldb] var toSend: Long = 100000 + private[leveldb] final val sampleRate: Double = 100000 +} \ No newline at end of file diff --git a/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/LevelDBStoreBrokerTest.scala b/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/LevelDBStoreBrokerTest.scala new file mode 100644 index 0000000000..f9d4432b07 --- /dev/null +++ b/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/LevelDBStoreBrokerTest.scala @@ -0,0 +1,61 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.activemq.leveldb + +import org.apache.activemq.broker.BrokerService +import org.apache.activemq.broker.BrokerTest +import org.apache.activemq.store.PersistenceAdapter +import java.io.File +import junit.framework.{TestSuite, Test} + +/** + * @author Hiram Chirino + */ +object LevelDBStoreBrokerTest { + def suite: Test = { + return new TestSuite(classOf[LevelDBStoreBrokerTest]) + } + + def main(args: Array[String]): Unit = { + junit.textui.TestRunner.run(suite) + } +} + +class LevelDBStoreBrokerTest extends BrokerTest { + + protected def createPersistenceAdapter(delete: Boolean): PersistenceAdapter = { + var store: LevelDBStore = new LevelDBStore + store.setDirectory(new File("target/activemq-data/leveldb")) + if (delete) { + store.deleteAllMessages + } + return store + } + + protected override def createBroker: BrokerService = { + var broker: BrokerService = new BrokerService + broker.setPersistenceAdapter(createPersistenceAdapter(true)) + return broker + } + + protected def createRestartedBroker: BrokerService = { + var broker: BrokerService = new BrokerService + broker.setPersistenceAdapter(createPersistenceAdapter(false)) + return broker + } +} \ No newline at end of file diff --git a/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/LevelDBStoreTest.scala b/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/LevelDBStoreTest.scala new file mode 100644 index 0000000000..8d1ac64ca4 --- /dev/null +++ b/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/LevelDBStoreTest.scala @@ -0,0 +1,41 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.activemq.leveldb + +import org.apache.activemq.store.PersistenceAdapter +import org.apache.activemq.store.PersistenceAdapterTestSupport +import java.io.File + +/** + *

    + *

    + * + * @author Hiram Chirino + */ +class LevelDBStoreTest extends PersistenceAdapterTestSupport { + override def testStoreCanHandleDupMessages: Unit = { + } + + protected def createPersistenceAdapter(delete: Boolean): PersistenceAdapter = { + var store: LevelDBStore = new LevelDBStore + store.setDirectory(new File("target/activemq-data/haleveldb")) + if (delete) { + store.deleteAllMessages + } + return store + } +} \ No newline at end of file diff --git a/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/Scenario.scala b/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/Scenario.scala new file mode 100644 index 0000000000..a6163b5981 --- /dev/null +++ b/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/Scenario.scala @@ -0,0 +1,331 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.activemq.leveldb + +import java.util.concurrent.atomic._ +import java.util.concurrent.TimeUnit._ +import scala.collection.mutable.ListBuffer + +object Scenario { + val MESSAGE_ID:Array[Byte] = "message-id" + val NEWLINE = '\n'.toByte + val NANOS_PER_SECOND = NANOSECONDS.convert(1, SECONDS) + + implicit def toBytes(value: String):Array[Byte] = value.getBytes("UTF-8") + + def o[T](value:T):Option[T] = value match { + case null => None + case x => Some(x) + } +} + +trait Scenario { + import Scenario._ + + var url:String = "tcp://localhost:61616" + var user_name:String = _ + var password:String = _ + + private var _producer_sleep: { def apply(): Int; def init(time: Long) } = new { def apply() = 0; def init(time: Long) {} } + def producer_sleep = _producer_sleep() + def producer_sleep_= (new_value: Int) = _producer_sleep = new { def apply() = new_value; def init(time: Long) {} } + def producer_sleep_= (new_func: { def apply(): Int; def init(time: Long) }) = _producer_sleep = new_func + + private var _consumer_sleep: { def apply(): Int; def init(time: Long) } = new { def apply() = 0; def init(time: Long) {} } + def consumer_sleep = _consumer_sleep() + def consumer_sleep_= (new_value: Int) = _consumer_sleep = new { def apply() = new_value; def init(time: Long) {} } + def consumer_sleep_= (new_func: { def apply(): Int; def init(time: Long) }) = _consumer_sleep = new_func + + var producers = 1 + var producers_per_sample = 0 + + var consumers = 1 + var consumers_per_sample = 0 + var sample_interval = 1000 + + var message_size = 1024 + var persistent = false + + var headers = Array[Array[(String,String)]]() + var selector:String = null + var no_local = false + var durable = false + var ack_mode = "auto" + var messages_per_connection = -1L + var display_errors = false + + var destination_type = "queue" + private var _destination_name: () => String = () => "load" + def destination_name = _destination_name() + def destination_name_=(new_name: String) = _destination_name = () => new_name + def destination_name_=(new_func: () => String) = _destination_name = new_func + var destination_count = 1 + + val producer_counter = new AtomicLong() + val consumer_counter = new AtomicLong() + val error_counter = new AtomicLong() + val done = new AtomicBoolean() + + var queue_prefix = "" + var topic_prefix = "" + var name = "custom" + + var drain_timeout = 2000L + + def run() = { + print(toString) + println("--------------------------------------") + println(" Running: Press ENTER to stop") + println("--------------------------------------") + println("") + + with_load { + + // start a sampling client... + val sample_thread = new Thread() { + override def run() = { + + def print_rate(name: String, periodCount:Long, totalCount:Long, nanos: Long) = { + + val rate_per_second: java.lang.Float = ((1.0f * periodCount / nanos) * NANOS_PER_SECOND) + println("%s total: %,d, rate: %,.3f per second".format(name, totalCount, rate_per_second)) + } + + try { + var start = System.nanoTime + var total_producer_count = 0L + var total_consumer_count = 0L + var total_error_count = 0L + collection_start + while( !done.get ) { + Thread.sleep(sample_interval) + val end = System.nanoTime + collection_sample + val samples = collection_end + samples.get("p_custom").foreach { case (_, count)::Nil => + total_producer_count += count + print_rate("Producer", count, total_producer_count, end - start) + case _ => + } + samples.get("c_custom").foreach { case (_, count)::Nil => + total_consumer_count += count + print_rate("Consumer", count, total_consumer_count, end - start) + case _ => + } + samples.get("e_custom").foreach { case (_, count)::Nil => + if( count!= 0 ) { + total_error_count += count + print_rate("Error", count, total_error_count, end - start) + } + case _ => + } + start = end + } + } catch { + case e:InterruptedException => + } + } + } + sample_thread.start() + + System.in.read() + done.set(true) + + sample_thread.interrupt + sample_thread.join + } + + } + + override def toString() = { + "--------------------------------------\n"+ + "Scenario Settings\n"+ + "--------------------------------------\n"+ + " destination_type = "+destination_type+"\n"+ + " queue_prefix = "+queue_prefix+"\n"+ + " topic_prefix = "+topic_prefix+"\n"+ + " destination_count = "+destination_count+"\n" + + " destination_name = "+destination_name+"\n" + + " sample_interval (ms) = "+sample_interval+"\n" + + " \n"+ + " --- Producer Properties ---\n"+ + " producers = "+producers+"\n"+ + " message_size = "+message_size+"\n"+ + " persistent = "+persistent+"\n"+ + " producer_sleep (ms) = "+producer_sleep+"\n"+ + " headers = "+headers.mkString(", ")+"\n"+ + " \n"+ + " --- Consumer Properties ---\n"+ + " consumers = "+consumers+"\n"+ + " consumer_sleep (ms) = "+consumer_sleep+"\n"+ + " selector = "+selector+"\n"+ + " durable = "+durable+"\n"+ + "" + + } + + protected def headers_for(i:Int) = { + if ( headers.isEmpty ) { + Array[(String, String)]() + } else { + headers(i%headers.size) + } + } + + var producer_samples:Option[ListBuffer[(Long,Long)]] = None + var consumer_samples:Option[ListBuffer[(Long,Long)]] = None + var error_samples = ListBuffer[(Long,Long)]() + + def collection_start: Unit = { + producer_counter.set(0) + consumer_counter.set(0) + error_counter.set(0) + + producer_samples = if (producers > 0 || producers_per_sample>0 ) { + Some(ListBuffer[(Long,Long)]()) + } else { + None + } + consumer_samples = if (consumers > 0 || consumers_per_sample>0 ) { + Some(ListBuffer[(Long,Long)]()) + } else { + None + } + } + + def collection_end: Map[String, scala.List[(Long,Long)]] = { + var rc = Map[String, List[(Long,Long)]]() + producer_samples.foreach{ samples => + rc += "p_"+name -> samples.toList + samples.clear + } + consumer_samples.foreach{ samples => + rc += "c_"+name -> samples.toList + samples.clear + } + rc += "e_"+name -> error_samples.toList + error_samples.clear + rc + } + + trait Client { + def start():Unit + def shutdown():Unit + } + + var producer_clients = List[Client]() + var consumer_clients = List[Client]() + + def with_load[T](func: =>T ):T = { + done.set(false) + + _producer_sleep.init(System.currentTimeMillis()) + _consumer_sleep.init(System.currentTimeMillis()) + + for (i <- 0 until producers) { + val client = createProducer(i) + producer_clients ::= client + client.start() + } + + for (i <- 0 until consumers) { + val client = createConsumer(i) + consumer_clients ::= client + client.start() + } + + try { + func + } finally { + done.set(true) + // wait for the threads to finish.. + for( client <- consumer_clients ) { + client.shutdown + } + consumer_clients = List() + for( client <- producer_clients ) { + client.shutdown + } + producer_clients = List() + } + } + + def drain = { + done.set(false) + if( destination_type=="queue" || destination_type=="raw_queue" || durable==true ) { + print("draining") + consumer_counter.set(0) + var consumer_clients = List[Client]() + for (i <- 0 until destination_count) { + val client = createConsumer(i) + consumer_clients ::= client + client.start() + } + + // Keep sleeping until we stop draining messages. + var drained = 0L + try { + Thread.sleep(drain_timeout); + def done() = { + val c = consumer_counter.getAndSet(0) + drained += c + c == 0 + } + while( !done ) { + print(".") + Thread.sleep(drain_timeout); + } + } finally { + done.set(true) + for( client <- consumer_clients ) { + client.shutdown + } + println(". (drained %d)".format(drained)) + } + } + } + + + def collection_sample: Unit = { + + val now = System.currentTimeMillis() + producer_samples.foreach(_.append((now, producer_counter.getAndSet(0)))) + consumer_samples.foreach(_.append((now, consumer_counter.getAndSet(0)))) + error_samples.append((now, error_counter.getAndSet(0))) + + // we might need to increment number the producers.. + for (i <- 0 until producers_per_sample) { + val client = createProducer(producer_clients.length) + producer_clients ::= client + client.start() + } + + // we might need to increment number the consumers.. + for (i <- 0 until consumers_per_sample) { + val client = createConsumer(consumer_clients.length) + consumer_clients ::= client + client.start() + } + + } + + def createProducer(i:Int):Client + def createConsumer(i:Int):Client + +} + + diff --git a/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/TestingHDFSServer.scala b/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/TestingHDFSServer.scala new file mode 100644 index 0000000000..8acde21859 --- /dev/null +++ b/activemq-leveldb/src/test/scala/org/apache/activemq/leveldb/TestingHDFSServer.scala @@ -0,0 +1,51 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.activemq.leveldb + +import org.apache.hadoop.conf.Configuration +import org.apache.hadoop.fs.FileSystem +import org.apache.hadoop.hdfs.MiniDFSCluster +import java.io.IOException + +/** + *

    + *

    + * + * @author Hiram Chirino + */ +object TestingHDFSServer { + private[leveldb] def start: Unit = { + var conf: Configuration = new Configuration + cluster = new MiniDFSCluster(conf, 1, true, null) + cluster.waitActive + fs = cluster.getFileSystem + } + + private[leveldb] def stop: Unit = { + try { + cluster.shutdown + } + catch { + case e: Throwable => { + e.printStackTrace + } + } + } + + private[leveldb] var cluster: MiniDFSCluster = null + private[leveldb] var fs: FileSystem = null +} \ No newline at end of file diff --git a/pom.xml b/pom.xml index 867398e45f..e67b2c7555 100755 --- a/pom.xml +++ b/pom.xml @@ -64,11 +64,13 @@ 1.3 1.0.6 1.0 + 1.0.0 1.9 1.11 0.1.8 1.8.0.10 4.2.1 + 1.9.2 1.9.0 1.0 7.6.7.v20120910 @@ -93,6 +95,9 @@ 1.0 9.4 9.4.0.1_2 + 2.15.1 + 2.9.1 + 1.8 1.6.6 3.0.7.RELEASE 1.2.1 @@ -198,6 +203,7 @@ activemq-jaas activemq-blueprint activemq-karaf + activemq-leveldb activemq-openwire-generator activemq-optional activemq-pool