This commit is contained in:
Clebert Suconic 2018-06-07 11:26:36 -04:00
commit 22f3f02aea
75 changed files with 7948 additions and 7531 deletions

View File

@ -427,7 +427,7 @@ public interface ClientSession extends XAResource, AutoCloseable {
* Creates a <em>non-temporary</em> queue.
*
* @param address the queue will be bound to this address
* @param routingType the delivery mode for this queue, MULTICAST or ANYCAST
* @param routingType the routing type for this queue, MULTICAST or ANYCAST
* @param queueName the name of the queue
* @param durable whether the queue is durable or not
* @throws ActiveMQException in an exception occurs while creating the queue
@ -440,7 +440,7 @@ public interface ClientSession extends XAResource, AutoCloseable {
* Notice: you will get an exception if the address or the filter doesn't match to an already existent queue
*
* @param address the queue will be bound to this address
* @param routingType the delivery mode for this queue, MULTICAST or ANYCAST
* @param routingType the routing type for this queue, MULTICAST or ANYCAST
* @param queueName the name of the queue
* @param durable if the queue is durable
* @throws ActiveMQException in an exception occurs while creating the queue
@ -453,7 +453,7 @@ public interface ClientSession extends XAResource, AutoCloseable {
* Notice: you will get an exception if the address or the filter doesn't match to an already existent queue
*
* @param address the queue will be bound to this address
* @param routingType the delivery mode for this queue, MULTICAST or ANYCAST
* @param routingType the routing type for this queue, MULTICAST or ANYCAST
* @param queueName the name of the queue
* @param filter whether the queue is durable or not
* @param durable if the queue is durable
@ -466,7 +466,7 @@ public interface ClientSession extends XAResource, AutoCloseable {
* Creates Shared queue. A queue that will exist as long as there are consumers or is durable.
*
* @param address the queue will be bound to this address
* @param routingType the delivery mode for this queue, MULTICAST or ANYCAST
* @param routingType the routing type for this queue, MULTICAST or ANYCAST
* @param queueName the name of the queue
* @param filter whether the queue is durable or not
* @param durable if the queue is durable
@ -483,7 +483,7 @@ public interface ClientSession extends XAResource, AutoCloseable {
* Creates a <em>non-temporary</em> queue.
*
* @param address the queue will be bound to this address
* @param routingType the delivery mode for this queue, MULTICAST or ANYCAST
* @param routingType the routing type for this queue, MULTICAST or ANYCAST
* @param queueName the name of the queue
* @param durable whether the queue is durable or not
* @throws ActiveMQException in an exception occurs while creating the queue
@ -494,7 +494,7 @@ public interface ClientSession extends XAResource, AutoCloseable {
* Creates a <em>non-temporary</em> queue <em>non-durable</em> queue.
*
* @param address the queue will be bound to this address
* @param routingType the delivery mode for this queue, MULTICAST or ANYCAST
* @param routingType the routing type for this queue, MULTICAST or ANYCAST
* @param queueName the name of the queue
* @throws ActiveMQException in an exception occurs while creating the queue
*/
@ -504,7 +504,7 @@ public interface ClientSession extends XAResource, AutoCloseable {
* Creates a <em>non-temporary</em> queue <em>non-durable</em> queue.
*
* @param address the queue will be bound to this address
* @param routingType the delivery mode for this queue, MULTICAST or ANYCAST
* @param routingType the routing type for this queue, MULTICAST or ANYCAST
* @param queueName the name of the queue
* @throws ActiveMQException in an exception occurs while creating the queue
*/
@ -514,7 +514,7 @@ public interface ClientSession extends XAResource, AutoCloseable {
* Creates a <em>non-temporary</em> queue.
*
* @param address the queue will be bound to this address
* @param routingType the delivery mode for this queue, MULTICAST or ANYCAST
* @param routingType the routing type for this queue, MULTICAST or ANYCAST
* @param queueName the name of the queue
* @param filter only messages which match this filter will be put in the queue
* @param durable whether the queue is durable or not
@ -527,7 +527,7 @@ public interface ClientSession extends XAResource, AutoCloseable {
* Creates a <em>non-temporary</em>queue.
*
* @param address the queue will be bound to this address
* @param routingType the delivery mode for this queue, MULTICAST or ANYCAST
* @param routingType the routing type for this queue, MULTICAST or ANYCAST
* @param queueName the name of the queue
* @param filter only messages which match this filter will be put in the queue
* @param durable whether the queue is durable or not
@ -539,7 +539,7 @@ public interface ClientSession extends XAResource, AutoCloseable {
* Creates a <em>non-temporary</em> queue.
*
* @param address the queue will be bound to this address
* @param routingType the delivery mode for this queue, MULTICAST or ANYCAST
* @param routingType the routing type for this queue, MULTICAST or ANYCAST
* @param queueName the name of the queue
* @param filter only messages which match this filter will be put in the queue
* @param durable whether the queue is durable or not
@ -553,7 +553,7 @@ public interface ClientSession extends XAResource, AutoCloseable {
* Creates a <em>non-temporary</em> queue.
*
* @param address the queue will be bound to this address
* @param routingType the delivery mode for this queue, MULTICAST or ANYCAST
* @param routingType the routing type for this queue, MULTICAST or ANYCAST
* @param queueName the name of the queue
* @param filter only messages which match this filter will be put in the queue
* @param durable whether the queue is durable or not
@ -569,7 +569,7 @@ public interface ClientSession extends XAResource, AutoCloseable {
* Creates a <em>non-temporary</em> queue.
*
* @param address the queue will be bound to this address
* @param routingType the delivery mode for this queue, MULTICAST or ANYCAST
* @param routingType the routing type for this queue, MULTICAST or ANYCAST
* @param queueName the name of the queue
* @param filter only messages which match this filter will be put in the queue
* @param durable whether the queue is durable or not
@ -587,7 +587,7 @@ public interface ClientSession extends XAResource, AutoCloseable {
* Creates a <em>non-temporary</em>queue.
*
* @param address the queue will be bound to this address
* @param routingType the delivery mode for this queue, MULTICAST or ANYCAST
* @param routingType the routing type for this queue, MULTICAST or ANYCAST
* @param queueName the name of the queue
* @param filter only messages which match this filter will be put in the queue
* @param durable whether the queue is durable or not
@ -600,7 +600,7 @@ public interface ClientSession extends XAResource, AutoCloseable {
* Creates a <em>non-temporary</em>queue.
*
* @param address the queue will be bound to this address
* @param routingType the delivery mode for this queue, MULTICAST or ANYCAST
* @param routingType the routing type for this queue, MULTICAST or ANYCAST
* @param queueName the name of the queue
* @param filter only messages which match this filter will be put in the queue
* @param durable whether the queue is durable or not
@ -616,7 +616,7 @@ public interface ClientSession extends XAResource, AutoCloseable {
* Creates a <em>non-temporary</em>queue.
*
* @param address the queue will be bound to this address
* @param routingType the delivery mode for this queue, MULTICAST or ANYCAST
* @param routingType the routing type for this queue, MULTICAST or ANYCAST
* @param queueName the name of the queue
* @param filter only messages which match this filter will be put in the queue
* @param durable whether the queue is durable or not
@ -634,7 +634,7 @@ public interface ClientSession extends XAResource, AutoCloseable {
* Creates a <em>temporary</em> queue.
*
* @param address the queue will be bound to this address
* @param routingType the delivery mode for this queue, MULTICAST or ANYCAST
* @param routingType the routing type for this queue, MULTICAST or ANYCAST
* @param queueName the name of the queue
* @throws ActiveMQException in an exception occurs while creating the queue
*/
@ -644,7 +644,7 @@ public interface ClientSession extends XAResource, AutoCloseable {
* Creates a <em>temporary</em> queue.
*
* @param address the queue will be bound to this address
* @param routingType the delivery mode for this queue, MULTICAST or ANYCAST
* @param routingType the routing type for this queue, MULTICAST or ANYCAST
* @param queueName the name of the queue
* @throws ActiveMQException in an exception occurs while creating the queue
*/
@ -654,7 +654,7 @@ public interface ClientSession extends XAResource, AutoCloseable {
* Creates a <em>temporary</em> queue with a filter.
*
* @param address the queue will be bound to this address
* @param routingType the delivery mode for this queue, MULTICAST or ANYCAST
* @param routingType the routing type for this queue, MULTICAST or ANYCAST
* @param queueName the name of the queue
* @param filter only messages which match this filter will be put in the queue
* @param maxConsumers how many concurrent consumers will be allowed on this queue
@ -670,7 +670,7 @@ public interface ClientSession extends XAResource, AutoCloseable {
* Creates a <em>temporary</em> queue with a filter.
*
* @param address the queue will be bound to this address
* @param routingType the delivery mode for this queue, MULTICAST or ANYCAST
* @param routingType the routing type for this queue, MULTICAST or ANYCAST
* @param queueName the name of the queue
* @param filter only messages which match this filter will be put in the queue
* @throws ActiveMQException in an exception occurs while creating the queue
@ -681,7 +681,7 @@ public interface ClientSession extends XAResource, AutoCloseable {
* Creates a <em>temporary</em> queue with a filter.
*
* @param address the queue will be bound to this address
* @param routingType the delivery mode for this queue, MULTICAST or ANYCAST
* @param routingType the routing type for this queue, MULTICAST or ANYCAST
* @param queueName the name of the queue
* @param filter only messages which match this filter will be put in the queue
* @throws ActiveMQException in an exception occurs while creating the queue

View File

@ -69,9 +69,8 @@
<xsd:annotation>
<xsd:documentation>
If true then the ActiveMQ Artemis Server will make use of any Protocol Managers that are in available
on the
classpath. If false then only the core protocol will be available, unless in Embedded mode where users
can inject their own Protocol Managers.
on the classpath. If false then only the core protocol will be available, unless in Embedded mode
where users can inject their own Protocol Managers.
</xsd:documentation>
</xsd:annotation>
</xsd:element>
@ -216,7 +215,7 @@
<xsd:element name="log-delegate-factory-class-name" type="xsd:string" maxOccurs="1" minOccurs="0">
<xsd:annotation>
<xsd:documentation>
XXX
DEPRECATED: the name of the factory class to use for log delegation
</xsd:documentation>
</xsd:annotation>
</xsd:element>
@ -356,7 +355,7 @@
<xsd:element name="remoting-incoming-interceptors" type="class-name-sequenceType" maxOccurs="1" minOccurs="0">
<xsd:annotation>
<xsd:documentation>
a list of &lt;class-name/&gt; elements with the names of classes to use for interceptor incoming
a list of &lt;class-name/&gt; elements with the names of classes to use for intercepting incoming
remoting packets
</xsd:documentation>
</xsd:annotation>
@ -365,7 +364,7 @@
<xsd:element name="remoting-outgoing-interceptors" type="class-name-sequenceType" maxOccurs="1" minOccurs="0">
<xsd:annotation>
<xsd:documentation>
a list of &lt;class-name/&gt; elements with the names of classes to use for interceptor outcoming
a list of &lt;class-name/&gt; elements with the names of classes to use for intercepting outgoing
remoting packets
</xsd:documentation>
</xsd:annotation>
@ -713,10 +712,10 @@
</xsd:annotation>
</xsd:element>
<xsd:element name="journal-file-open-timeout" type="xsd:int" maxOccurs="1" minOccurs="0">
<xsd:element name="journal-file-open-timeout" type="xsd:int" default="5" maxOccurs="1" minOccurs="0">
<xsd:annotation>
<xsd:documentation>
the length of time to wait when opening a new Journal file before timing out and failing
the length of time in seconds to wait when opening a new Journal file before timing out and failing
</xsd:documentation>
</xsd:annotation>
</xsd:element>
@ -867,7 +866,7 @@
<xsd:attribute name="match" type="xsd:string" use="required">
<xsd:annotation>
<xsd:documentation>
regular expression for matching security roles against addresses
pattern for matching security roles against addresses; can use wildards
</xsd:documentation>
</xsd:annotation>
</xsd:attribute>
@ -1075,7 +1074,7 @@
<xsd:element name="wildcard-addresses" type="wildcardType" maxOccurs="1" minOccurs="0">
<xsd:annotation>
<xsd:documentation>
Wildcard addresses format
parameters to configure wildcard address matching format
</xsd:documentation>
</xsd:annotation>
</xsd:element>
@ -1103,9 +1102,20 @@
<xsd:element name="broadcast-group">
<xsd:complexType>
<xsd:sequence>
<!-- XXX these 2 local-* here...-->
<xsd:element ref="local-bind-address" maxOccurs="1" minOccurs="0"/>
<xsd:element ref="local-bind-port" maxOccurs="1" minOccurs="0"/>
<xsd:element ref="local-bind-address" maxOccurs="1" minOccurs="0">
<xsd:annotation>
<xsd:documentation>
a local address to which the datagram socket is bound
</xsd:documentation>
</xsd:annotation>
</xsd:element>
<xsd:element ref="local-bind-port" maxOccurs="1" minOccurs="0">
<xsd:annotation>
<xsd:documentation>
a local port to which the datagram socket is bound
</xsd:documentation>
</xsd:annotation>
</xsd:element>
<xsd:element name="group-address" type="xsd:string" maxOccurs="1" minOccurs="0">
<xsd:annotation>
<xsd:documentation>
@ -1163,7 +1173,6 @@
<xsd:element name="discovery-group">
<xsd:complexType>
<xsd:all>
<!-- XXX -->
<xsd:element name="group-address" type="xsd:string" maxOccurs="1" minOccurs="0">
<xsd:annotation>
<xsd:documentation>
@ -1650,8 +1659,7 @@
<xsd:annotation>
<xsd:documentation>
DEPRECATED: use message-load-balancing-type instead. Select STRICT to mimic
forward-when-no-consumers=true
and ON_DEMAND to mimic forward-when-no-consumers=false.
forward-when-no-consumers=true and ON_DEMAND to mimic forward-when-no-consumers=false.
</xsd:documentation>
</xsd:annotation>
</xsd:element>
@ -1750,7 +1758,7 @@
<xsd:attribute name="discovery-group-name" type="xsd:IDREF" use="required">
<xsd:annotation>
<xsd:documentation>
XXX -- this is a duplicate...
name of discovery group used by this cluster-connection
</xsd:documentation>
</xsd:annotation>
</xsd:attribute>
@ -2961,7 +2969,7 @@
</xsd:annotation>
</xsd:element>
<xsd:element name="default-max-consumers" type="xsd:int" default="200" maxOccurs="1" minOccurs="0">
<xsd:element name="default-max-consumers" type="xsd:int" default="-1" maxOccurs="1" minOccurs="0">
<xsd:annotation>
<xsd:documentation>
the maximum number of consumers allowed on this queue at any one time
@ -2990,7 +2998,7 @@
<xsd:attribute name="match" type="xsd:string" use="required">
<xsd:annotation>
<xsd:documentation>
XXX
pattern for matching settings against addresses; can use wildards
</xsd:documentation>
</xsd:annotation>
</xsd:attribute>
@ -3009,7 +3017,7 @@
<xsd:element name="max-connections" type="xsd:int" default="-1" maxOccurs="1" minOccurs="0">
<xsd:annotation>
<xsd:documentation>
how many connections are allowed by the matched entity (-1 means no limit, default is -1)
how many connections are allowed by the matched user (-1 means no limit, default is -1)
</xsd:documentation>
</xsd:annotation>
</xsd:element>
@ -3017,7 +3025,7 @@
<xsd:element name="max-queues" type="xsd:int" default="-1" maxOccurs="1" minOccurs="0">
<xsd:annotation>
<xsd:documentation>
how many queues can be created by the matched entity (-1 means no limit, default is -1)
how many queues can be created by the matched user (-1 means no limit, default is -1)
</xsd:documentation>
</xsd:annotation>
</xsd:element>
@ -3146,7 +3154,7 @@
<xsd:attribute name="name" type="xsd:string" use="required">
<xsd:annotation>
<xsd:documentation>
The address name to matches incoming message addresses
The address name to match incoming message addresses
</xsd:documentation>
</xsd:annotation>
</xsd:attribute>

View File

@ -10,14 +10,18 @@
* [Using the Server](using-server.md)
* [Upgrading](upgrading.md)
* [Address Model](address-model.md)
* [Using JMS](using-jms.md)
* [Using Core](using-core.md)
* [Using AMQP](using-AMQP.md)
* [Protocols and Interoperability](protocols-interoperability.md)
* [AMQP](amqp.md)
* [MQTT](mqtt.md)
* [STOMP](stomp.md)
* [OpenWire](openwire.md)
* [Core](core.md)
* [Mapping JMS Concepts to the Core API](jms-core-mapping.md)
* [Using JMS](using-jms.md)
* [The Client Classpath](client-classpath.md)
* [Examples](examples.md)
* [Routing Messages With Wild Cards](wildcard-routing.md)
* [Understanding the Apache ActiveMQ Artemis Wildcard Syntax](wildcard-syntax.md)
* [Wildcard Syntax](wildcard-syntax.md)
* [Filter Expressions](filter-expressions.md)
* [Persistence](persistence.md)
* [Configuring Transports](configuring-transports.md)
@ -56,14 +60,13 @@
* [Thread management](thread-pooling.md)
* [Logging](logging.md)
* [REST Interface](rest.md)
* [Embedding Apache ActiveMQ Artemis](embedding-activemq.md)
* [Embedding the Broker](embedding-activemq.md)
* [Apache Karaf](karaf.md)
* [Apache Tomcat](tomcat.md)
* [Spring Integration](spring-integration.md)
* [CDI Integration](cdi-integration.md)
* [Intercepting Operations](intercepting-operations.md)
* [Protocols and Interoperability](protocols-interoperability.md)
* [Tools](tools.md)
* [Data Tools](data-tools.md)
* [Maven Plugin](maven-plugin.md)
* [Unit Testing](unit-testing.md)
* [Troubleshooting and Performance Tuning](perf-tuning.md)

File diff suppressed because it is too large Load Diff

129
docs/user-manual/en/amqp.md Normal file
View File

@ -0,0 +1,129 @@
# AMQP
Apache ActiveMQ Artemis supports the [AMQP
1.0](https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=amqp)
specification. By default there are `acceptor` elements configured to accept
AMQP connections on ports `61616` and `5672`.
See the general [Protocols and Interoperability](protocols-interoperability.md)
chapter for details on configuring an `acceptor` for AMQP.
You can use *any* AMQP 1.0 compatible clients.
A short list includes:
- [qpid clients](https://qpid.apache.org/download.html)
- [.NET Clients](https://blogs.apache.org/activemq/entry/using-net-libraries-with-activemq)
- [Javascript NodeJS](https://github.com/noodlefrenzy/node-amqp10)
- [Java Script RHEA](https://github.com/grs/rhea)
- ... and many others.
## Examples
We have a few examples as part of the Artemis distribution:
- .NET:
- ./examples/protocols/amqp/dotnet
- ProtonCPP
- ./examples/protocols/amqp/proton-cpp
- ./examples/protocols/amqp/proton-clustered-cpp
- Ruby
- ./examples/protocols/amqp/proton-ruby
- Java (Using the qpid JMS Client)
- ./examples/protocols/amqp/queue
- Interceptors
- ./examples/features/standard/interceptor-amqp
- ./examples/features/standard/broker-plugin
## Message Conversions
The broker will not perform any message conversion to any other protocols when
sending AMQP and receiving AMQP.
However if you intend your message to be received by an AMQP JMS Client, you
must follow the [JMS Mapping
Conventions](https://www.oasis-open.org/committees/download.php/53086/amqp-bindmap-jms-v1.0-wd05.pdf).
If you send a body type that is not recognized by this specification the
conversion between AMQP and any other protocol will make it a Binary Message.
Make sure you follow these conventions if you intend to cross protocols or
languages. Especially on the message body.
A compatibility setting allows aligning the naming convention of AMQP queues
(JMS Durable and Shared Subscriptions) with CORE. For backwards compatibility
reasons, you need to explicitly enable this via broker configuration:
- `amqp-use-core-subscription-naming`
- `true` - use queue naming convention that is aligned with CORE.
- `false` (default) - use older naming convention.
## Intercepting and changing messages
We don't recommend changing messages at the server's side for a few reasons:
- AMQP messages are meant to be immutable
- The message won't be the original message the user sent
- AMQP has the possibility of signing messages. The signature would be broken.
- For performance reasons. We try not to re-encode (or even decode) messages.
If regardless these recommendations you still need and want to intercept and
change AMQP messages, look at the aforementioned interceptor examples.
## AMQP and security
The Apache ActiveMQ Artemis Server accepts the PLAIN, ANONYMOUS, and GSSAPI
SASL mechanism. These are implemented on the broker's [security](security.md)
infrastructure.
## AMQP and destinations
If an AMQP Link is dynamic then a temporary queue will be created and either
the remote source or remote target address will be set to the name of the
temporary queue. If the Link is not dynamic then the the address of the remote
target or source will used for the queue. If this does not exist then it will
be auto-created if the settings allow.
## AMQP and Multicast Addresses (Topics)
Although AMQP has no notion of "topics" it is still possible to treat AMQP
consumers or receivers as subscriptions rather than just consumers on a queue.
By default any receiving link that attaches to an address that has only
`multicast` enabled will be treated as a subscription and a corresponding
subscription queue will be created. If the Terminus Durability is either
`UNSETTLED_STATE` or `CONFIGURATION` then the queue will be made durable
(similar to a JMS durable subscription) and given a name made up from the
container id and the link name, something like `my-container-id:my-link-name`.
If the Terminus Durability is configured as `NONE` then a volatile `multicast`
queue will be created.
## AMQP and Coordinations - Handling Transactions
An AMQP links target can also be a Coordinator. A Coordinator is used to handle
transactions. If a coordinator is used then the underlying server session will
be transacted and will be either rolled back or committed via the coordinator.
> **Note:**
>
> AMQP allows the use of multiple transactions per session,
> `amqp:multi-txns-per-ssn`, however in this version of Apache ActiveMQ Artemis
> will only support single transactions per session.
## AMQP scheduling message delivery
An AMQP message can provide scheduling information that controls the time in
the future when the message will be delivered at the earliest. This
information is provided by adding a message annotation to the sent message.
There are two different message annotations that can be used to schedule a
message for later delivery:
- `x-opt-delivery-time`
The specified value must be a positive long corresponding to the time the
message should be made available for delivery (in milliseconds).
- `x-opt-delivery-delay`
The specified value must be a positive long corresponding to the amount of
milliseconds after the broker receives the given message before it should be
made available for delivery.
If both annotations are present in the same message then the broker will prefer
the more specific `x-opt-delivery-time` value.

View File

@ -1,125 +1,126 @@
# Core Architecture
Apache ActiveMQ Artemis core is designed simply as set of Plain Old Java Objects
(POJOs) - we hope you like its clean-cut design.
Apache ActiveMQ Artemis core is designed simply as set of Plain Old Java
Objects (POJOs) - we hope you like its clean-cut design.
Each Apache ActiveMQ Artemis server has its own ultra high performance persistent
journal, which it uses for message and other persistence.
Each Apache ActiveMQ Artemis server has its own ultra high performance
persistent journal, which it uses for message and other persistence.
Using a high performance journal allows outrageous persistence message
performance, something not achievable when using a relational database
for persistence.
performance, something not achievable when using a relational database for
persistence (although JDBC is still an option if necessary).
Apache ActiveMQ Artemis clients, potentially on different physical machines interact
with the Apache ActiveMQ Artemis server. Apache ActiveMQ Artemis currently provides two APIs for
messaging at the client side:
Apache ActiveMQ Artemis clients, potentially on different physical machines,
interact with the Apache ActiveMQ Artemis broker. Apache ActiveMQ Artemis
currently ships two API implementations for messaging at the client side:
1. Core client API. This is a simple intuitive Java API that is aligned with the Artemis internal Core. Allowing more
control of broker objects, like for example, direct creation of addresses and queues. The Core API also offers a
full set of messaging functionality without some of the complexities of JMS.
1. Core client API. This is a simple intuitive Java API that is aligned with
the Artemis internal Core. Allowing more control of broker objects (e.g
direct creation of addresses and queues). The Core API also offers a full set
of messaging functionality without some of the complexities of JMS.
2. JMS client API. The standard JMS API is available at the client side.
2. JMS 2.0 client API. The standard JMS API is available at the client side.
Apache ActiveMQ Artemis also provides different protocol implementations on the server so you can use respective clients for these protocols:
1. AMQP
2. OpenWire
3. MQTT
4. STOMP
5. HornetQ (for use with HornetQ clients).
6. CORE (Artemis CORE protocol)
Apache ActiveMQ Artemis also provides different protocol implementations on the
server so you can use respective clients for these protocols:
- AMQP
- OpenWire
- MQTT
- STOMP
- HornetQ (for use with HornetQ clients).
- Core (Artemis CORE protocol)
JMS semantics are implemented by a JMS facade layer on the client side.
The Apache ActiveMQ Artemis server does not speak JMS and in fact does not know
anything about JMS, it is a protocol agnostic messaging server designed
to be used with multiple different protocols.
The Apache ActiveMQ Artemis broker does not speak JMS and in fact does not know
anything about JMS, it is a protocol agnostic messaging server designed to be
used with multiple different protocols.
When a user uses the JMS API on the client side, all JMS interactions
are translated into operations on the Apache ActiveMQ Artemis core client API before
being transferred over the wire using the Apache ActiveMQ Artemis wire format.
When a user uses the JMS API on the client side, all JMS interactions are
translated into operations on the Apache ActiveMQ Artemis core client API
before being transferred over the wire using the core protocol.
The server always just deals with core API interactions.
The broker always just deals with core API interactions.
A schematic illustrating this relationship is shown in figure 3.1 below:
![ActiveMQ Artemis architecture1](images/architecture1.jpg)
Figure 3.1 shows two user applications interacting with an Apache ActiveMQ Artemis
server. User Application 1 is using the JMS API, while User Application
Figure 3.1 shows two user applications interacting with an Apache ActiveMQ
Artemis server. User Application 1 is using the JMS API, while User Application
2 is using the core client API directly.
You can see from the diagram that the JMS API is implemented by a thin
facade layer on the client side.
You can see from the diagram that the JMS API is implemented by a thin facade
layer on the client side.
## Stand-alone Broker
The normal stand-alone messaging broker configuration comprises a core
messaging broker and a number of protocol managers that provide support for
the various protocol mentioned earlier. Protocol managers are pluggable
if you
messaging broker and a number of protocol managers that provide support for the
various protocol mentioned earlier.
The stand-alone broker configuration uses [Airline](https://github.com/airlift/airline)
for bootstrapping the Broker.
The stand-alone broker configuration uses
[Airline](https://github.com/airlift/airline) for bootstrapping the Broker.
The stand-alone broker architecture is shown in figure 3.3 below:
![ActiveMQ Artemis architecture3](images/architecture3.jpg)
For more information on server configuration files see [Server Configuration](configuration-index.md)
For more information on server configuration files see [Server
Configuration](configuration-index.md)
## Embedded Broker
Apache ActiveMQ Artemis core is designed as a set of simple POJOs so if you have an
application that requires messaging functionality internally but you
Apache ActiveMQ Artemis core is designed as a set of simple POJOs so if you
have an application that requires messaging functionality internally but you
don't want to expose that as an Apache ActiveMQ Artemis broker you can directly
instantiate and embed Apache ActiveMQ Artemis brokers in your own application.
instantiate and embed brokers in your own application.
For more information on embedding Apache ActiveMQ Artemis, see [Embedding Apache ActiveMQ Artemis](embedding-activemq.md).
Read more about [embedding Apache ActiveMQ Artemis](embedding-activemq.md).
## Integrated with a Java EE application server
Apache ActiveMQ Artemis provides its own fully functional Java Connector Architecture
(JCA) adaptor which enables it to be integrated easily into any Java EE
compliant application server or servlet engine.
Apache ActiveMQ Artemis provides its own fully functional Java Connector
Architecture (JCA) adaptor which enables it to be integrated easily into any
Java EE compliant application server or servlet engine.
Java EE application servers provide Message Driven Beans (MDBs), which are a
special type of Enterprise Java Beans (EJBs) that can process messages
from sources such as JMS systems or mail systems.
special type of Enterprise Java Beans (EJBs) that can process messages from
sources such as JMS systems or mail systems.
Probably the most common use of an MDB is to consume messages from a JMS
messaging system.
According to the Java EE specification, a Java EE application server uses a JCA
adapter to integrate with a JMS messaging system so it can consume
messages for MDBs.
adapter to integrate with a JMS messaging system so it can consume messages for
MDBs.
However, the JCA adapter is not only used by the Java EE application server
for *consuming* messages via MDBs, it is also used when sending message
to the JMS messaging system e.g. from inside an EJB or servlet.
However, the JCA adapter is not only used by the Java EE application server for
*consuming* messages via MDBs, it is also used when sending message to the JMS
messaging system e.g. from inside an EJB or servlet.
When integrating with a JMS messaging system from inside a Java EE
application server it is always recommended that this is done via a JCA
adaptor. In fact, communicating with a JMS messaging system directly,
without using JCA would be illegal according to the Java EE specification.
When integrating with a JMS messaging system from inside a Java EE application
server it is always recommended that this is done via a JCA adaptor. In fact,
communicating with a JMS messaging system directly, without using JCA would be
illegal according to the Java EE specification.
The application server's JCA service provides extra functionality such
as connection pooling and automatic transaction enlistment, which are
desirable when using messaging, say, from inside an EJB. It is possible
to talk to a JMS messaging system directly from an EJB, MDB or servlet
without going through a JCA adapter, but this is not recommended since
you will not be able to take advantage of the JCA features, such as
caching of JMS sessions, which can result in poor performance.
The application server's JCA service provides extra functionality such as
connection pooling and automatic transaction enlistment, which are desirable
when using messaging, say, from inside an EJB. It is possible to talk to a JMS
messaging system directly from an EJB, MDB or servlet without going through a
JCA adapter, but this is not recommended since you will not be able to take
advantage of the JCA features, such as caching of JMS sessions, which can
result in poor performance.
Figure 3.2 below shows a Java EE application server integrating with a
Apache ActiveMQ Artemis server via the Apache ActiveMQ Artemis JCA adaptor. Note that all
communication between EJB sessions or entity beans and Message Driven
beans go through the adaptor and not directly to Apache ActiveMQ Artemis.
Figure 3.2 below shows a Java EE application server integrating with a Apache
ActiveMQ Artemis server via the Apache ActiveMQ Artemis JCA adaptor. Note that
all communication between EJB sessions or entity beans and Message Driven beans
go through the adaptor and not directly to Apache ActiveMQ Artemis.
The large arrow with the prohibited sign shows an EJB session bean
talking directly to the Apache ActiveMQ Artemis server. This is not recommended as
you'll most likely end up creating a new connection and session every
time you want to interact from the EJB, which is an anti-pattern.
The large arrow with the prohibited sign shows an EJB session bean talking
directly to the Apache ActiveMQ Artemis server. This is not recommended as
you'll most likely end up creating a new connection and session every time you
want to interact from the EJB, which is an anti-pattern.
![ActiveMQ Artemis architecture2](images/architecture2.jpg)

View File

@ -1,50 +1,46 @@
# Apache ActiveMQ Artemis Plugin Support
Apache ActiveMQ Artemis is designed to allow extra functionality to be added by
creating a plugin. Multiple plugins can be registered at the same time and they will be chained
together and executed in the order they are registered. (i.e. the first plugin registered
is always executed first).
creating a plugin. Multiple plugins can be registered at the same time and they
will be chained together and executed in the order they are registered (i.e.
the first plugin registered is always executed first).
Creating a plugin is very simple. It requires implementing the [`ActiveMQServerPlugin`](https://github.com/apache/activemq-artemis/blob/master/artemis-server/src/main/java/org/apache/activemq/artemis/core/server/plugin/ActiveMQServerPlugin.java)
interface, making sure the plugin is on the classpath, and registering it with the broker. Only the methods that you want to add behavior for need to be implemented as all of the interface methods are default methods.
Creating a plugin is very simple. It requires:
## Adding the plugin to the classpath
- Implementing the [`ActiveMQServerPlugin`](https://github.com/apache/activemq-artemis/blob/master/artemis-server/src/main/java/org/apache/activemq/artemis/core/server/plugin/ActiveMQServerPlugin.java)
interface
- Making sure the plugin is [on the classpath](using-server.md#adding-runtime-dependencies)
- Registering it with the broker either via [xml](#registering-a-plugin) or [programmatically](#registering-a-plugin-programmatically).
See the documentation on [adding runtime dependencies](using-server.md) to understand how to make your plugin available to the broker.
If you are using an embed system than you will need the jar under the regular classpath of your embedded application.
Only the methods that you want to add behavior for need to be implemented as
all of the interface methods are default methods.
## Registering a Plugin
To register a plugin with by XML you need to add the `broker-plugins` element at the `broker.xml`. It is also possible
to pass configuration to a plugin using the `property` child element(s). These properties (zero to many)
will be read and passed into the Plugin's `init(Map<String, String>)` operation after the plugin
has been instantiated.
To register a plugin with by XML you need to add the `broker-plugins` element
at the `broker.xml`. It is also possible to pass configuration to a plugin
using the `property` child element(s). These properties (zero to many) will be
read and passed into the plugin's `init(Map<String, String>)` operation after
the plugin has been instantiated.
```xml
<configuration ...>
...
<broker-plugins>
<broker-plugin class-name="some.plugin.UserPlugin">
<property key="property1" value="val_1" />
<property key="property2" value="val_2" />
</broker-plugin>
</broker-plugins>
...
</configuration>
<broker-plugins>
<broker-plugin class-name="some.plugin.UserPlugin">
<property key="property1" value="val_1" />
<property key="property2" value="val_2" />
</broker-plugin>
</broker-plugins>
```
## Registering a Plugin Programmatically
For registering a plugin programmatically you need to call the
registerBrokerPlugin() method and pass in a new instance of your plugin. In the example below
assuming your plugin is called `UserPlugin`, registering it looks like the following:
`registerBrokerPlugin()` method and pass in a new instance of your plugin. In
the example below assuming your plugin is called `UserPlugin`, registering it
looks like the following:
``` java
...
Configuration config = new ConfigurationImpl();
@ -53,144 +49,80 @@ Configuration config = new ConfigurationImpl();
config.registerBrokerPlugin(new UserPlugin());
```
## Using the LoggingActiveMQServerPlugin
## Using the `LoggingActiveMQServerPlugin`
The LoggingActiveMQServerPlugin logs specific broker events.
The `LoggingActiveMQServerPlugin` logs specific broker events.
You can select which events are logged by setting the following configuration properties to `true`.
You can select which events are logged by setting the following configuration
properties to `true`.
<table summary="LoggingActiveMQServerPlugin configuration" border="1">
<colgroup>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th>Property</th>
<th>Property Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>LOG_CONNECTION_EVENTS</td>
<td>Log info when a Connection is created/destroy. Default `false`.</td>
</tr>
<tr>
<td>LOG_SESSION_EVENTS</td>
<td>Log info when a Session is created/closed. Default `false`.</td>
</tr>
<tr>
<td>LOG_CONSUMER_EVENTS</td>
<td>Logs info when a Consumer is created/closed. Default `false`.</td>
</tr>
<tr>
<td>LOG_DELIVERING_EVENTS</td>
<td>Logs info when message is delivered to a consumer and when a message is acknowledged by a consumer.
Default `false`</td>
</tr>
<tr>
<td>LOG_SENDING_EVENTS</td>
<td>Logs info when a message has been sent to an address and when a message has been routed within the broker.
Default `false`</td>
</tr>
<tr>
<td>LOG_INTERNAL_EVENTS</td>
<td>Logs info when a queue created/destroyed, when a message is expired, when a bridge is deployed and when a critical
failure occurs. Default `false`</td>
</tr>
<tr>
<td>LOG_ALL_EVENTS</td>
<td>Logs info for all the above events. Default `false`</td>
</tr>
</tbody>
</table>
Property|Trigger Event|Default Value
---|---|---
`LOG_CONNECTION_EVENTS`|Connection is created/destroy.|`false`
`LOG_SESSION_EVENTS`|Session is created/closed.|`false`
`LOG_CONSUMER_EVENTS`|Consumer is created/closed|`false`
`LOG_DELIVERING_EVENTS`|Message is delivered to a consumer and when a message is acknowledged by a consumer.|`false`
`LOG_SENDING_EVENTS`|When a message has been sent to an address and when a message has been routed within the broker.|`false`
`LOG_INTERNAL_EVENTS`|When a queue created/destroyed, when a message is expired, when a bridge is deployed and when a critical failure occurs.|`false`
`LOG_ALL_EVENTS`|Includes all the above events.|`false`
By default the LoggingActiveMQServerPlugin wil not log any information. The logging is activated by setting one (or a selection)
of the above configuration properties to `true`.
By default the `LoggingActiveMQServerPlugin` will not log any information. The
logging is activated by setting one (or a selection) of the above configuration
properties to `true`.
To configure the plugin, you can add the following configuration to the broker. In the example below both LOG_DELIVERING_EVENTS
and LOG_SENDING_EVENTS will be logged by the broker.
To configure the plugin, you can add the following configuration to the broker.
In the example below both `LOG_DELIVERING_EVENTS` and `LOG_SENDING_EVENTS` will
be logged by the broker.
```xml
<configuration ...>
...
<broker-plugins>
<broker-plugin class-name="org.apache.activemq.artemis.core.server.plugin.impl.LoggingActiveMQServerPlugin">
<property key="LOG_DELIVERING_EVENTS" value="true" />
<property key="LOG_SENDING_EVENTS" value="true" />
</broker-plugin>
</broker-plugins>
...
</configuration>
<broker-plugins>
<broker-plugin class-name="org.apache.activemq.artemis.core.server.plugin.impl.LoggingActiveMQServerPlugin">
<property key="LOG_DELIVERING_EVENTS" value="true" />
<property key="LOG_SENDING_EVENTS" value="true" />
</broker-plugin>
</broker-plugins>
```
Most events in the LoggingActiveMQServerPlugin follow a `beforeX` and `afterX` notification pattern e.g beforeCreateConsumer() and afterCreateConsumer().
Most events in the `LoggingActiveMQServerPlugin` follow a `beforeX` and
`afterX` notification pattern (e.g `beforeCreateConsumer()` and
`afterCreateConsumer()`).
At Log Level `INFO`, the LoggingActiveMQServerPlugin logs an entry when an `afterX` notification occurs. By setting the Logger
"org.apache.activemq.artemis.core.server.plugin.impl" to `DEBUG` Level, log entries are generated for both `beforeX` and `afterX` notifications.
Log Level `DEBUG` will also log more information for a notification when available.
At Log Level `INFO`, the LoggingActiveMQServerPlugin logs an entry when an
`afterX` notification occurs. By setting the logger
`org.apache.activemq.artemis.core.server.plugin.impl` to `DEBUG`, log entries
are generated for both `beforeX` and `afterX` notifications. Log level `DEBUG`
will also log more information for a notification when available.
## Using the NotificationActiveMQServerPlugin
The NotificationActiveMQServerPlugin can be configured to send extra notifications for specific broker events.
The NotificationActiveMQServerPlugin can be configured to send extra
notifications for specific broker events.
You can select which notifications are sent by setting the following configuration properties to `true`.
You can select which notifications are sent by setting the following
configuration properties to `true`.
<table summary="NotificationActiveMQServerPlugin configuration" border="1">
<colgroup>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th>Property</th>
<th>Property Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>SEND_CONNECTION_NOTIFICATIONS</td>
<td>Sends a notification when a Connection is created/destroy. Default `false`.</td>
</tr>
<tr>
<td>SEND_SESSION_NOTIFICATIONS</td>
<td>Sends a notification when a Session is created/closed. Default `false`.</td>
</tr>
<tr>
<td>SEND_ADDRESS_NOTIFICATIONS</td>
<td>Sends a notification when an Address is added/removed. Default `false`.</td>
</tr>
<tr>
<td>SEND_DELIVERED_NOTIFICATIONS</td>
<td>Sends a notification when message is delivered to a consumer. Default `false`</td>
</tr>
<tr>
<td>SEND_EXPIRED_NOTIFICATIONS</td>
<td>Sends a notification when message has been expired by the broker. Default `false`</td>
</tr>
</tbody>
</table>
Property|Property Description|Default Value
---|---
`SEND_CONNECTION_NOTIFICATIONS`|Sends a notification when a Connection is created/destroy.|`false`.
`SEND_SESSION_NOTIFICATIONS`|Sends a notification when a Session is created/closed.|`false`.
`SEND_ADDRESS_NOTIFICATIONS`|Sends a notification when an Address is added/removed.|`false`.
`SEND_DELIVERED_NOTIFICATIONS`|Sends a notification when message is delivered to a consumer.|`false`
`SEND_EXPIRED_NOTIFICATIONS`|Sends a notification when message has been expired by the broker.|`false`
By default the NotificationActiveMQServerPlugin will not send any notifications. The plugin is activated by setting one (or a selection)
of the above configuration properties to `true`.
By default the NotificationActiveMQServerPlugin will not send any
notifications. The plugin is activated by setting one (or a selection) of the
above configuration properties to `true`.
To configure the plugin, you can add the following configuration to the broker. In the example below both SEND_CONNECTION_NOTIFICATIONS
and SEND_SESSION_NOTIFICATIONS will be sent by the broker.
To configure the plugin, you can add the following configuration to the broker.
In the example below both `SEND_CONNECTION_NOTIFICATIONS` and
`SEND_SESSION_NOTIFICATIONS` will be sent by the broker.
```xml
<configuration ...>
...
<broker-plugins>
<broker-plugin class-name="org.apache.activemq.artemis.core.server.plugin.impl.NotificationActiveMQServerPlugin">
<property key="SEND_CONNECTION_NOTIFICATIONS" value="true" />
<property key="SEND_SESSION_NOTIFICATIONS" value="true" />
</broker-plugin>
</broker-plugins>
...
</configuration>
<broker-plugins>
<broker-plugin class-name="org.apache.activemq.artemis.core.server.plugin.impl.NotificationActiveMQServerPlugin">
<property key="SEND_CONNECTION_NOTIFICATIONS" value="true" />
<property key="SEND_SESSION_NOTIFICATIONS" value="true" />
</broker-plugin>
</broker-plugins>
```

View File

@ -4,12 +4,13 @@ Apache ActiveMQ Artemis requires just a single jar on the *client classpath*.
> **Warning**
>
> The client jar mentioned here can be found in the `lib/client` directory of the
> Apache ActiveMQ Artemis distribution. Be sure you only use the jar from the correct
> version of the release, you *must not* mix and match versions of jars
> from different Apache ActiveMQ Artemis versions. Mixing and matching different jar
> versions may cause subtle errors and failures to occur.
> The client jar mentioned here can be found in the `lib/client` directory of
> the Apache ActiveMQ Artemis distribution. Be sure you only use the jar from
> the correct version of the release, you *must not* mix and match versions of
> jars from different Apache ActiveMQ Artemis versions. Mixing and matching
> different jar versions may cause subtle errors and failures to occur.
Whether you are using JMS or just the Core API simply add the `artemis-jms-client-all.jar`
from the `lib/client` directory to your client classpath. This is a "shaded" jar that
contains all the Artemis code plus dependencies (e.g. JMS spec, Netty, etc.).
Whether you are using JMS or just the Core API simply add the
`artemis-jms-client-all.jar` from the `lib/client` directory to your client
classpath. This is a "shaded" jar that contains all the Artemis code plus
dependencies (e.g. JMS spec, Netty, etc.).

View File

@ -6,111 +6,102 @@ connection between the client and the server.
## 100% Transparent session re-attachment
If the failure was due to some transient failure such as a temporary
network failure, and the target server was not restarted, then the
sessions will still be existent on the server, assuming the client
hasn't been disconnected for more than connection-ttl [Detecting Dead Connections](connection-ttl.md)
If the failure was due to some transient failure such as a temporary network
failure, and the target server was not restarted, then the sessions will still
be existent on the server, assuming the client hasn't been disconnected for
more than [connection-ttl](connection-ttl.md)
In this scenario, Apache ActiveMQ Artemis will automatically re-attach the client
sessions to the server sessions when the connection reconnects. This is
done 100% transparently and the client can continue exactly as if
nothing had happened.
In this scenario, Apache ActiveMQ Artemis will automatically re-attach the
client sessions to the server sessions when the connection reconnects. This is
done 100% transparently and the client can continue exactly as if nothing had
happened.
The way this works is as follows:
As Apache ActiveMQ Artemis clients send commands to their servers they store each sent
command in an in-memory buffer. In the case that connection failure
occurs and the client subsequently reattaches to the same server, as
part of the reattachment protocol the server informs the client during
reattachment with the id of the last command it successfully received
from that client.
As Apache ActiveMQ Artemis clients send commands to their servers they store
each sent command in an in-memory buffer. In the case that connection failure
occurs and the client subsequently reattaches to the same server, as part of
the reattachment protocol the server informs the client during reattachment
with the id of the last command it successfully received from that client.
If the client has sent more commands than were received before failover
it can replay any sent commands from its buffer so that the client and
server can reconcile their states.Ac
If the client has sent more commands than were received before failover it can
replay any sent commands from its buffer so that the client and server can
reconcile their states.Ac
The size of this buffer is configured with the `confirmationWindowSize`
parameter on the connection URL. When the server has received
`confirmationWindowSize` bytes of commands and processed them it will
send back a command confirmation to the client, and the client can then
free up space in the buffer.
`confirmationWindowSize` bytes of commands and processed them it will send back
a command confirmation to the client, and the client can then free up space in
the buffer.
The window is specified in bytes.
Setting this parameter to `-1` disables any buffering and prevents any
re-attachment from occurring, forcing reconnect instead. The default
value for this parameter is `-1`. (Which means by default no auto
re-attachment will occur)
re-attachment from occurring, forcing reconnect instead. The default value for
this parameter is `-1`. (Which means by default no auto re-attachment will
occur)
## Session reconnection
Alternatively, the server might have actually been restarted after
crashing or being stopped. In this case any sessions will no longer be
existent on the server and it won't be possible to 100% transparently
re-attach to them.
Alternatively, the server might have actually been restarted after crashing or
being stopped. In this case any sessions will no longer be existent on the
server and it won't be possible to 100% transparently re-attach to them.
In this case, Apache ActiveMQ Artemis will automatically reconnect the connection and
*recreate* any sessions and consumers on the server corresponding to the
sessions and consumers on the client. This process is exactly the same
as what happens during failover onto a backup server.
In this case, Apache ActiveMQ Artemis will automatically reconnect the
connection and *recreate* any sessions and consumers on the server
corresponding to the sessions and consumers on the client. This process is
exactly the same as what happens during failover onto a backup server.
Client reconnection is also used internally by components such as core
bridges to allow them to reconnect to their target servers.
Client reconnection is also used internally by components such as core bridges
to allow them to reconnect to their target servers.
Please see the section on failover [Automatic Client Failover](ha.md) to get a full understanding of how
transacted and non-transacted sessions are reconnected during
failover/reconnect and what you need to do to maintain *once and only
once*delivery guarantees.
Please see the section on failover [Automatic Client Failover](ha.md) to get a
full understanding of how transacted and non-transacted sessions are
reconnected during failover/reconnect and what you need to do to maintain *once
and only once* delivery guarantees.
## Configuring reconnection/reattachment attributes
Client reconnection is configured using the following parameters:
- `retryInterval`. This optional parameter determines the period in
milliseconds between subsequent reconnection attempts, if the
connection to the target server has failed. The default value is
`2000` milliseconds.
- `retryInterval`. This optional parameter determines the period in
milliseconds between subsequent reconnection attempts, if the connection to
the target server has failed. The default value is `2000` milliseconds.
- `retryIntervalMultiplier`. This optional parameter determines
determines a multiplier to apply to the time since the last retry to
compute the time to the next retry.
- `retryIntervalMultiplier`. This optional parameter determines determines a
multiplier to apply to the time since the last retry to compute the time to
the next retry.
This allows you to implement an *exponential backoff* between retry
attempts.
This allows you to implement an *exponential backoff* between retry attempts.
Let's take an example:
Let's take an example:
If we set `retryInterval` to `1000` ms and we set
`retryIntervalMultiplier` to `2.0`, then, if the first reconnect
attempt fails, we will wait `1000` ms then `2000` ms then `4000` ms
between subsequent reconnection attempts.
If we set `retryInterval` to `1000` ms and we set `retryIntervalMultiplier`
to `2.0`, then, if the first reconnect attempt fails, we will wait `1000` ms
then `2000` ms then `4000` ms between subsequent reconnection attempts.
The default value is `1.0` meaning each reconnect attempt is spaced
at equal intervals.
The default value is `1.0` meaning each reconnect attempt is spaced at equal
intervals.
- `maxRetryInterval`. This optional parameter determines the maximum
retry interval that will be used. When setting
`retryIntervalMultiplier` it would otherwise be possible that
subsequent retries exponentially increase to ridiculously large
values. By setting this parameter you can set an upper limit on that
value. The default value is `2000` milliseconds.
- `maxRetryInterval`. This optional parameter determines the maximum retry
interval that will be used. When setting `retryIntervalMultiplier` it would
otherwise be possible that subsequent retries exponentially increase to
ridiculously large values. By setting this parameter you can set an upper limit
on that value. The default value is `2000` milliseconds.
- `reconnectAttempts`. This optional parameter determines the total
number of reconnect attempts to make before giving up and shutting
down. A value of `-1` signifies an unlimited number of attempts. The
default value is `0`.
- `reconnectAttempts`. This optional parameter determines the total number of
reconnect attempts to make before giving up and shutting down. A value of
`-1` signifies an unlimited number of attempts. The default value is `0`.
All of these parameters are set on the URL used to connect to the broker.
If your client does manage to reconnect but the session is no longer
available on the server, for instance if the server has been restarted
or it has timed out, then the client won't be able to re-attach, and any
`ExceptionListener` or `FailureListener` instances registered on the
connection or session will be called.
If your client does manage to reconnect but the session is no longer available
on the server, for instance if the server has been restarted or it has timed
out, then the client won't be able to re-attach, and any `ExceptionListener` or
`FailureListener` instances registered on the connection or session will be
called.
ExceptionListeners and SessionFailureListeners
==============================================
## ExceptionListeners and SessionFailureListeners
Please note, that when a client reconnects or re-attaches, any
registered JMS `ExceptionListener` or core API `SessionFailureListener`
will be called.
Please note, that when a client reconnects or re-attaches, any registered JMS
`ExceptionListener` or core API `SessionFailureListener` will be called.

View File

@ -46,13 +46,13 @@ connect to them with the minimum of configuration.
Server discovery is a mechanism by which servers can propagate their
connection details to:
- Messaging clients. A messaging client wants to be able to connect to
the servers of the cluster without having specific knowledge of
which servers in the cluster are up at any one time.
- Messaging clients. A messaging client wants to be able to connect to
the servers of the cluster without having specific knowledge of
which servers in the cluster are up at any one time.
- Other servers. Servers in a cluster want to be able to create
cluster connections to each other without having prior knowledge of
all the other servers in the cluster.
- Other servers. Servers in a cluster want to be able to create
cluster connections to each other without having prior knowledge of
all the other servers in the cluster.
This information, let's call it the Cluster Topology, is actually sent
around normal Apache ActiveMQ Artemis connections to clients and to other servers over
@ -94,12 +94,12 @@ Let's take a look at an example broadcast group from
```xml
<broadcast-groups>
<broadcast-group name="my-broadcast-group">
<local-bind-address>172.16.9.3</local-bind-address>
<local-bind-port>5432</local-bind-port>
<group-address>231.7.7.7</group-address>
<group-port>9876</group-port>
<broadcast-period>2000</broadcast-period>
<connector-ref>netty-connector</connector-ref>
<local-bind-address>172.16.9.3</local-bind-address>
<local-bind-port>5432</local-bind-port>
<group-address>231.7.7.7</group-address>
<group-port>9876</group-port>
<broadcast-period>2000</broadcast-period>
<connector-ref>netty-connector</connector-ref>
</broadcast-group>
</broadcast-groups>
```
@ -108,64 +108,66 @@ Some of the broadcast group parameters are optional and you'll normally
use the defaults, but we specify them all in the above example for
clarity. Let's discuss each one in turn:
- `name` attribute. Each broadcast group in the server must have a
unique name.
- `name` attribute. Each broadcast group in the server must have a
unique name.
- `local-bind-address`. This is the local bind address that the
datagram socket is bound to. If you have multiple network interfaces
on your server, you would specify which one you wish to use for
broadcasts by setting this property. If this property is not
specified then the socket will be bound to the wildcard address, an
IP address chosen by the kernel. This is a UDP specific attribute.
- `local-bind-address`. This is the local bind address that the
datagram socket is bound to. If you have multiple network interfaces
on your server, you would specify which one you wish to use for
broadcasts by setting this property. If this property is not
specified then the socket will be bound to the wildcard address, an
IP address chosen by the kernel. This is a UDP specific attribute.
- `local-bind-port`. If you want to specify a local port to which the
datagram socket is bound you can specify it here. Normally you would
just use the default value of `-1` which signifies that an anonymous
port should be used. This parameter is always specified in
conjunction with `local-bind-address`. This is a UDP specific
attribute.
- `local-bind-port`. If you want to specify a local port to which the
datagram socket is bound you can specify it here. Normally you would
just use the default value of `-1` which signifies that an anonymous
port should be used. This parameter is always specified in
conjunction with `local-bind-address`. This is a UDP specific
attribute.
- `group-address`. This is the multicast address to which the data
will be broadcast. It is a class D IP address in the range
`224.0.0.0` to `239.255.255.255`, inclusive. The address `224.0.0.0`
is reserved and is not available for use. This parameter is
mandatory. This is a UDP specific attribute.
- `group-address`. This is the multicast address to which the data
will be broadcast. It is a class D IP address in the range
`224.0.0.0` to `239.255.255.255`, inclusive. The address `224.0.0.0`
is reserved and is not available for use. This parameter is
mandatory. This is a UDP specific attribute.
- `group-port`. This is the UDP port number used for broadcasting.
This parameter is mandatory. This is a UDP specific attribute.
- `group-port`. This is the UDP port number used for broadcasting.
This parameter is mandatory. This is a UDP specific attribute.
- `broadcast-period`. This is the period in milliseconds between
consecutive broadcasts. This parameter is optional, the default
value is `2000` milliseconds.
- `broadcast-period`. This is the period in milliseconds between
consecutive broadcasts. This parameter is optional, the default
value is `2000` milliseconds.
- `connector-ref`. This specifies the connector and optional backup
connector that will be broadcasted (see [Configuring the Transport](configuring-transports.md) for more information on
connectors).
- `connector-ref`. This specifies the connector and optional backup
connector that will be broadcasted (see [Configuring the Transport](configuring-transports.md) for more information on
connectors).
Here is another example broadcast group that defines a JGroups broadcast
group:
<broadcast-groups>
<broadcast-group name="my-broadcast-group">
<jgroups-file>test-jgroups-file_ping.xml</jgroups-file>
<jgroups-channel>activemq_broadcast_channel</jgroups-channel>
<broadcast-period>2000</broadcast-period>
<connector-ref>netty-connector</connector-ref>
</broadcast-group>
</broadcast-groups>
```xml
<broadcast-groups>
<broadcast-group name="my-broadcast-group">
<jgroups-file>test-jgroups-file_ping.xml</jgroups-file>
<jgroups-channel>activemq_broadcast_channel</jgroups-channel>
<broadcast-period>2000</broadcast-period>
<connector-ref>netty-connector</connector-ref>
</broadcast-group>
</broadcast-groups>
```
To be able to use JGroups to broadcast, one must specify two attributes,
i.e. `jgroups-file` and `jgroups-channel`, as discussed in details as
following:
- `jgroups-file` attribute. This is the name of JGroups configuration
file. It will be used to initialize JGroups channels. Make sure the
file is in the java resource path so that Apache ActiveMQ Artemis can load it.
- `jgroups-file` attribute. This is the name of JGroups configuration
file. It will be used to initialize JGroups channels. Make sure the
file is in the java resource path so that Apache ActiveMQ Artemis can load it.
- `jgroups-channel` attribute. The name that JGroups channels connect
to for broadcasting.
- `jgroups-channel` attribute. The name that JGroups channels connect
to for broadcasting.
> **Note**
> **Note:**
>
> The JGroups attributes (`jgroups-file` and `jgroups-channel`) and UDP
> specific attributes described above are exclusive of each other. Only
@ -174,57 +176,59 @@ following:
The following is an example of a JGroups file
<config xmlns="urn:org:jgroups"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:org:jgroups http://www.jgroups.org/schema/JGroups-3.0.xsd">
<TCP loopback="true"
recv_buf_size="20000000"
send_buf_size="640000"
discard_incompatible_packets="true"
max_bundle_size="64000"
max_bundle_timeout="30"
enable_bundling="true"
use_send_queues="false"
sock_conn_timeout="300"
```xml
<config xmlns="urn:org:jgroups"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:org:jgroups http://www.jgroups.org/schema/JGroups-3.0.xsd">
<TCP loopback="true"
recv_buf_size="20000000"
send_buf_size="640000"
discard_incompatible_packets="true"
max_bundle_size="64000"
max_bundle_timeout="30"
enable_bundling="true"
use_send_queues="false"
sock_conn_timeout="300"
thread_pool.enabled="true"
thread_pool.min_threads="1"
thread_pool.max_threads="10"
thread_pool.keep_alive_time="5000"
thread_pool.queue_enabled="false"
thread_pool.queue_max_size="100"
thread_pool.rejection_policy="run"
thread_pool.enabled="true"
thread_pool.min_threads="1"
thread_pool.max_threads="10"
thread_pool.keep_alive_time="5000"
thread_pool.queue_enabled="false"
thread_pool.queue_max_size="100"
thread_pool.rejection_policy="run"
oob_thread_pool.enabled="true"
oob_thread_pool.min_threads="1"
oob_thread_pool.max_threads="8"
oob_thread_pool.keep_alive_time="5000"
oob_thread_pool.queue_enabled="false"
oob_thread_pool.queue_max_size="100"
oob_thread_pool.rejection_policy="run"/>
oob_thread_pool.enabled="true"
oob_thread_pool.min_threads="1"
oob_thread_pool.max_threads="8"
oob_thread_pool.keep_alive_time="5000"
oob_thread_pool.queue_enabled="false"
oob_thread_pool.queue_max_size="100"
oob_thread_pool.rejection_policy="run"/>
<FILE_PING location="../file.ping.dir"/>
<MERGE2 max_interval="30000"
min_interval="10000"/>
<FD_SOCK/>
<FD timeout="10000" max_tries="5" />
<VERIFY_SUSPECT timeout="1500" />
<BARRIER />
<pbcast.NAKACK
use_mcast_xmit="false"
retransmit_timeout="300,600,1200,2400,4800"
discard_delivered_msgs="true"/>
<UNICAST timeout="300,600,1200" />
<pbcast.STABLE stability_delay="1000" desired_avg_gossip="50000"
max_bytes="400000"/>
<pbcast.GMS print_local_addr="true" join_timeout="3000"
view_bundling="true"/>
<FC max_credits="2000000"
min_threshold="0.10"/>
<FRAG2 frag_size="60000" />
<pbcast.STATE_TRANSFER/>
<pbcast.FLUSH timeout="0"/>
</config>
<FILE_PING location="../file.ping.dir"/>
<MERGE2 max_interval="30000"
min_interval="10000"/>
<FD_SOCK/>
<FD timeout="10000" max_tries="5" />
<VERIFY_SUSPECT timeout="1500" />
<BARRIER />
<pbcast.NAKACK
use_mcast_xmit="false"
retransmit_timeout="300,600,1200,2400,4800"
discard_delivered_msgs="true"/>
<UNICAST timeout="300,600,1200" />
<pbcast.STABLE stability_delay="1000" desired_avg_gossip="50000"
max_bytes="400000"/>
<pbcast.GMS print_local_addr="true" join_timeout="3000"
view_bundling="true"/>
<FC max_credits="2000000"
min_threshold="0.10"/>
<FRAG2 frag_size="60000" />
<pbcast.STATE_TRANSFER/>
<pbcast.FLUSH timeout="0"/>
</config>
```
As it shows, the file content defines a jgroups protocol stacks. If you
want Apache ActiveMQ Artemis to use this stacks for channel creation, you have to make
@ -233,7 +237,9 @@ configuration to be the name of this jgroups configuration file. For
example if the above stacks configuration is stored in a file named
"jgroups-stacks.xml" then your `jgroups-file` should be like
<jgroups-file>jgroups-stacks.xml</jgroups-file>
```xml
<jgroups-file>jgroups-stacks.xml</jgroups-file>
```
#### Discovery Groups
@ -252,18 +258,18 @@ of time it will remove that server's entry from its list.
Discovery groups are used in two places in Apache ActiveMQ Artemis:
- By cluster connections so they know how to obtain an initial
connection to download the topology
- By cluster connections so they know how to obtain an initial
connection to download the topology
- By messaging clients so they know how to obtain an initial
connection to download the topology
- By messaging clients so they know how to obtain an initial
connection to download the topology
Although a discovery group will always accept broadcasts, its current
list of available live and backup servers is only ever used when an
initial connection is made, from then server discovery is done over the
normal Apache ActiveMQ Artemis connections.
> **Note**
> **Note:**
>
> Each discovery group must be configured with broadcast endpoint (UDP
> or JGroups) that matches its broadcast group counterpart. For example,
@ -277,67 +283,71 @@ configuration file `broker.xml`. All discovery groups
must be defined inside a `discovery-groups` element. There can be many
discovery groups defined by Apache ActiveMQ Artemis server. Let's look at an example:
<discovery-groups>
<discovery-group name="my-discovery-group">
<local-bind-address>172.16.9.7</local-bind-address>
<group-address>231.7.7.7</group-address>
<group-port>9876</group-port>
<refresh-timeout>10000</refresh-timeout>
</discovery-group>
</discovery-groups>
```xml
<discovery-groups>
<discovery-group name="my-discovery-group">
<local-bind-address>172.16.9.7</local-bind-address>
<group-address>231.7.7.7</group-address>
<group-port>9876</group-port>
<refresh-timeout>10000</refresh-timeout>
</discovery-group>
</discovery-groups>
```
We'll consider each parameter of the discovery group:
- `name` attribute. Each discovery group must have a unique name per
server.
- `name` attribute. Each discovery group must have a unique name per
server.
- `local-bind-address`. If you are running with multiple network
interfaces on the same machine, you may want to specify that the
discovery group listens only only a specific interface. To do this
you can specify the interface address with this parameter. This
parameter is optional. This is a UDP specific attribute.
- `local-bind-address`. If you are running with multiple network
interfaces on the same machine, you may want to specify that the
discovery group listens only only a specific interface. To do this
you can specify the interface address with this parameter. This
parameter is optional. This is a UDP specific attribute.
- `group-address`. This is the multicast IP address of the group to
listen on. It should match the `group-address` in the broadcast
group that you wish to listen from. This parameter is mandatory.
This is a UDP specific attribute.
- `group-address`. This is the multicast IP address of the group to
listen on. It should match the `group-address` in the broadcast
group that you wish to listen from. This parameter is mandatory.
This is a UDP specific attribute.
- `group-port`. This is the UDP port of the multicast group. It should
match the `group-port` in the broadcast group that you wish to
listen from. This parameter is mandatory. This is a UDP specific
attribute.
- `group-port`. This is the UDP port of the multicast group. It should
match the `group-port` in the broadcast group that you wish to
listen from. This parameter is mandatory. This is a UDP specific
attribute.
- `refresh-timeout`. This is the period the discovery group waits
after receiving the last broadcast from a particular server before
removing that servers connector pair entry from its list. You would
normally set this to a value significantly higher than the
`broadcast-period` on the broadcast group otherwise servers might
intermittently disappear from the list even though they are still
broadcasting due to slight differences in timing. This parameter is
optional, the default value is `10000` milliseconds (10 seconds).
- `refresh-timeout`. This is the period the discovery group waits
after receiving the last broadcast from a particular server before
removing that servers connector pair entry from its list. You would
normally set this to a value significantly higher than the
`broadcast-period` on the broadcast group otherwise servers might
intermittently disappear from the list even though they are still
broadcasting due to slight differences in timing. This parameter is
optional, the default value is `10000` milliseconds (10 seconds).
Here is another example that defines a JGroups discovery group:
<discovery-groups>
<discovery-group name="my-broadcast-group">
<jgroups-file>test-jgroups-file_ping.xml</jgroups-file>
<jgroups-channel>activemq_broadcast_channel</jgroups-channel>
<refresh-timeout>10000</refresh-timeout>
</discovery-group>
</discovery-groups>
```xml
<discovery-groups>
<discovery-group name="my-broadcast-group">
<jgroups-file>test-jgroups-file_ping.xml</jgroups-file>
<jgroups-channel>activemq_broadcast_channel</jgroups-channel>
<refresh-timeout>10000</refresh-timeout>
</discovery-group>
</discovery-groups>
```
To receive broadcast from JGroups channels, one must specify two
attributes, `jgroups-file` and `jgroups-channel`, as discussed in
details as following:
- `jgroups-file` attribute. This is the name of JGroups configuration
file. It will be used to initialize JGroups channels. Make sure the
file is in the java resource path so that Apache ActiveMQ Artemis can load it.
- `jgroups-file` attribute. This is the name of JGroups configuration
file. It will be used to initialize JGroups channels. Make sure the
file is in the java resource path so that Apache ActiveMQ Artemis can load it.
- `jgroups-channel` attribute. The name that JGroups channels connect
to for receiving broadcasts.
- `jgroups-channel` attribute. The name that JGroups channels connect
to for receiving broadcasts.
> **Note**
> **Note:**
>
> The JGroups attributes (`jgroups-file` and `jgroups-channel`) and UDP
> specific attributes described above are exclusive of each other. Only
@ -355,7 +365,9 @@ differs depending on whether you're using JMS or the core API.
Use the `udp` URL scheme and a host:port combination matches the group-address and
group-port from the corresponding `broadcast-group` on the server:
udp://231.7.7.7:9876
```
udp://231.7.7.7:9876
```
The element `discovery-group-ref` specifies the name of a discovery
group defined in `broker.xml`.
@ -402,7 +414,9 @@ A static list of possible servers can also be used by a normal client.
A list of servers to be used for the initial connection attempt can be
specified in the connection URI using a syntax with `()`, e.g.:
(tcp://myhost:61616,tcp://myhost2:61616)?reconnectAttempts=5
```
(tcp://myhost:61616,tcp://myhost2:61616)?reconnectAttempts=5
```
The brackets are expanded so the same query can be appended after the last
bracket for ease.
@ -453,219 +467,223 @@ typical cluster connection. Cluster connections are always defined in
There can be zero or more cluster connections defined per Apache ActiveMQ Artemis
server.
<cluster-connections>
<cluster-connection name="my-cluster">
<address></address>
<connector-ref>netty-connector</connector-ref>
<check-period>1000</check-period>
<connection-ttl>5000</connection-ttl>
<min-large-message-size>50000</min-large-message-size>
<call-timeout>5000</call-timeout>
<retry-interval>500</retry-interval>
<retry-interval-multiplier>1.0</retry-interval-multiplier>
<max-retry-interval>5000</max-retry-interval>
<initial-connect-attempts>-1</initial-connect-attempts>
<reconnect-attempts>-1</reconnect-attempts>
<use-duplicate-detection>true</use-duplicate-detection>
<message-load-balancing>ON_DEMAND</message-load-balancing>
<max-hops>1</max-hops>
<confirmation-window-size>32000</confirmation-window-size>
<call-failover-timeout>30000</call-failover-timeout>
<notification-interval>1000</notification-interval>
<notification-attempts>2</notification-attempts>
<discovery-group-ref discovery-group-name="my-discovery-group"/>
</cluster-connection>
</cluster-connections>
```xml
<cluster-connections>
<cluster-connection name="my-cluster">
<address></address>
<connector-ref>netty-connector</connector-ref>
<check-period>1000</check-period>
<connection-ttl>5000</connection-ttl>
<min-large-message-size>50000</min-large-message-size>
<call-timeout>5000</call-timeout>
<retry-interval>500</retry-interval>
<retry-interval-multiplier>1.0</retry-interval-multiplier>
<max-retry-interval>5000</max-retry-interval>
<initial-connect-attempts>-1</initial-connect-attempts>
<reconnect-attempts>-1</reconnect-attempts>
<use-duplicate-detection>true</use-duplicate-detection>
<message-load-balancing>ON_DEMAND</message-load-balancing>
<max-hops>1</max-hops>
<confirmation-window-size>32000</confirmation-window-size>
<call-failover-timeout>30000</call-failover-timeout>
<notification-interval>1000</notification-interval>
<notification-attempts>2</notification-attempts>
<discovery-group-ref discovery-group-name="my-discovery-group"/>
</cluster-connection>
</cluster-connections>
```
In the above cluster connection all parameters have been explicitly
specified. The following shows all the available configuration options
- `address` Each cluster connection only applies to addresses that
match the specified `address` field. An address is matched on the
cluster connection when it begins with the string specified in this
field. The `address` field on a cluster connection also supports comma
separated lists and an exclude syntax `!`. To prevent an address
from being matched on this cluster connection, prepend a cluster
connection address string with `!`.
- `address` Each cluster connection only applies to addresses that
match the specified `address` field. An address is matched on the
cluster connection when it begins with the string specified in this
field. The `address` field on a cluster connection also supports comma
separated lists and an exclude syntax `!`. To prevent an address
from being matched on this cluster connection, prepend a cluster
connection address string with `!`.
In the case shown above the cluster connection will load balance
messages sent to all addresses (since it's empty).
In the case shown above the cluster connection will load balance
messages sent to all addresses (since it's empty).
The address can be any value and you can have many cluster
connections with different values of `address`, simultaneously
balancing messages for those addresses, potentially to different
clusters of servers. By having multiple cluster connections on
different addresses a single Apache ActiveMQ Artemis Server can effectively take
part in multiple clusters simultaneously.
The address can be any value and you can have many cluster
connections with different values of `address`, simultaneously
balancing messages for those addresses, potentially to different
clusters of servers. By having multiple cluster connections on
different addresses a single Apache ActiveMQ Artemis Server can effectively take
part in multiple clusters simultaneously.
Be careful not to have multiple cluster connections with overlapping
values of `address`, e.g. "europe" and "europe.news" since this
could result in the same messages being distributed between more
than one cluster connection, possibly resulting in duplicate
deliveries.
Be careful not to have multiple cluster connections with overlapping
values of `address`, e.g. "europe" and "europe.news" since this
could result in the same messages being distributed between more
than one cluster connection, possibly resulting in duplicate
deliveries.
Examples:
Examples:
- 'eu'
matches all addresses starting with 'eu'
- '!eu'
matches all address except for those starting with 'eu'
- 'eu.uk,eu.de'
matches all addresses starting with either 'eu.uk' or
'eu.de'
- 'eu,!eu.uk'
matches all addresses starting with 'eu' but not those
starting with 'eu.uk'
- 'eu'
matches all addresses starting with 'eu'
- '!eu'
matches all address except for those starting with 'eu'
- 'eu.uk,eu.de'
matches all addresses starting with either 'eu.uk' or
'eu.de'
- 'eu,!eu.uk'
matches all addresses starting with 'eu' but not those
starting with 'eu.uk'
Notes:
**Note:**:
- Address exclusion will always takes precedence over address
inclusion.
- Address matching on cluster connections does not support
wild-card matching.
- Address exclusion will always takes precedence over address
inclusion.
- Address matching on cluster connections does not support
wild-card matching.
- `connector-ref`. This is the connector which will be sent to other
nodes in the cluster so they have the correct cluster topology.
- `connector-ref`. This is the connector which will be sent to other
nodes in the cluster so they have the correct cluster topology.
This parameter is mandatory.
This parameter is mandatory.
- `check-period`. The period (in milliseconds) used to check if the
cluster connection has failed to receive pings from another server.
Default is 30000.
- `check-period`. The period (in milliseconds) used to check if the
cluster connection has failed to receive pings from another server.
Default is 30000.
- `connection-ttl`. This is how long a cluster connection should stay
alive if it stops receiving messages from a specific node in the
cluster. Default is 60000.
- `connection-ttl`. This is how long a cluster connection should stay
alive if it stops receiving messages from a specific node in the
cluster. Default is 60000.
- `min-large-message-size`. If the message size (in bytes) is larger
than this value then it will be split into multiple segments when
sent over the network to other cluster members. Default is 102400.
- `min-large-message-size`. If the message size (in bytes) is larger
than this value then it will be split into multiple segments when
sent over the network to other cluster members. Default is 102400.
- `call-timeout`. When a packet is sent via a cluster connection and
is a blocking call, i.e. for acknowledgements, this is how long it
will wait (in milliseconds) for the reply before throwing an
exception. Default is 30000.
- `call-timeout`. When a packet is sent via a cluster connection and
is a blocking call, i.e. for acknowledgements, this is how long it
will wait (in milliseconds) for the reply before throwing an
exception. Default is 30000.
- `retry-interval`. We mentioned before that, internally, cluster
connections cause bridges to be created between the nodes of the
cluster. If the cluster connection is created and the target node
has not been started, or say, is being rebooted, then the cluster
connections from other nodes will retry connecting to the target
until it comes back up, in the same way as a bridge does.
- `retry-interval`. We mentioned before that, internally, cluster
connections cause bridges to be created between the nodes of the
cluster. If the cluster connection is created and the target node
has not been started, or say, is being rebooted, then the cluster
connections from other nodes will retry connecting to the target
until it comes back up, in the same way as a bridge does.
This parameter determines the interval in milliseconds between retry
attempts. It has the same meaning as the `retry-interval` on a
bridge (as described in [Core Bridges](core-bridges.md)).
This parameter determines the interval in milliseconds between retry
attempts. It has the same meaning as the `retry-interval` on a
bridge (as described in [Core Bridges](core-bridges.md)).
This parameter is optional and its default value is `500`
milliseconds.
This parameter is optional and its default value is `500`
milliseconds.
- `retry-interval-multiplier`. This is a multiplier used to increase
the `retry-interval` after each reconnect attempt, default is 1.
- `retry-interval-multiplier`. This is a multiplier used to increase
the `retry-interval` after each reconnect attempt, default is 1.
- `max-retry-interval`. The maximum delay (in milliseconds) for
retries. Default is 2000.
- `max-retry-interval`. The maximum delay (in milliseconds) for
retries. Default is 2000.
- `initial-connect-attempts`. The number of times the system will try
to connect a node in the cluster initially. If the max-retry is
achieved this node will be considered permanently down and the
system will not route messages to this node. Default is -1 (infinite
retries).
- `initial-connect-attempts`. The number of times the system will try
to connect a node in the cluster initially. If the max-retry is
achieved this node will be considered permanently down and the
system will not route messages to this node. Default is -1 (infinite
retries).
- `reconnect-attempts`. The number of times the system will try to
reconnect to a node in the cluster. If the max-retry is achieved
this node will be considered permanently down and the system will
stop routing messages to this node. Default is -1 (infinite
retries).
- `reconnect-attempts`. The number of times the system will try to
reconnect to a node in the cluster. If the max-retry is achieved
this node will be considered permanently down and the system will
stop routing messages to this node. Default is -1 (infinite
retries).
- `use-duplicate-detection`. Internally cluster connections use
bridges to link the nodes, and bridges can be configured to add a
duplicate id property in each message that is forwarded. If the
target node of the bridge crashes and then recovers, messages might
be resent from the source node. By enabling duplicate detection any
duplicate messages will be filtered out and ignored on receipt at
the target node.
- `use-duplicate-detection`. Internally cluster connections use
bridges to link the nodes, and bridges can be configured to add a
duplicate id property in each message that is forwarded. If the
target node of the bridge crashes and then recovers, messages might
be resent from the source node. By enabling duplicate detection any
duplicate messages will be filtered out and ignored on receipt at
the target node.
This parameter has the same meaning as `use-duplicate-detection` on
a bridge. For more information on duplicate detection, please see [Duplicate Detection](duplicate-detection.md).
Default is true.
This parameter has the same meaning as `use-duplicate-detection` on
a bridge. For more information on duplicate detection, please see [Duplicate Detection](duplicate-detection.md).
Default is true.
- `message-load-balancing`. This parameter determines if/how
messages will be distributed between other nodes of the cluster.
It can be one of three values - `OFF`, `STRICT`, or `ON_DEMAND`
(default). This parameter replaces the deprecated
`forward-when-no-consumers` parameter.
If this is set to `OFF` then messages will never be forwarded to
another node in the cluster
- `message-load-balancing`. This parameter determines if/how
messages will be distributed between other nodes of the cluster.
It can be one of three values - `OFF`, `STRICT`, or `ON_DEMAND`
(default). This parameter replaces the deprecated
`forward-when-no-consumers` parameter.
If this is set to `OFF` then messages will never be forwarded to
another node in the cluster
If this is set to `STRICT` then each incoming message will be round
robin'd even though the same queues on the other nodes of the
cluster may have no consumers at all, or they may have consumers
that have non matching message filters (selectors). Note that
Apache ActiveMQ Artemis will *not* forward messages to other nodes
if there are no *queues* of the same name on the other nodes, even
if this parameter is set to `STRICT`. Using `STRICT` is like setting
the legacy `forward-when-no-consumers` parameter to `true`.
If this is set to `STRICT` then each incoming message will be round
robin'd even though the same queues on the other nodes of the
cluster may have no consumers at all, or they may have consumers
that have non matching message filters (selectors). Note that
Apache ActiveMQ Artemis will *not* forward messages to other nodes
if there are no *queues* of the same name on the other nodes, even
if this parameter is set to `STRICT`. Using `STRICT` is like setting
the legacy `forward-when-no-consumers` parameter to `true`.
If this is set to `ON_DEMAND` then Apache ActiveMQ Artemis will only
forward messages to other nodes of the cluster if the address to which
they are being forwarded has queues which have consumers, and if those
consumers have message filters (selectors) at least one of those
selectors must match the message. Using `ON_DEMAND` is like setting
the legacy `forward-when-no-consumers` parameter to `false`.
If this is set to `ON_DEMAND` then Apache ActiveMQ Artemis will only
forward messages to other nodes of the cluster if the address to which
they are being forwarded has queues which have consumers, and if those
consumers have message filters (selectors) at least one of those
selectors must match the message. Using `ON_DEMAND` is like setting
the legacy `forward-when-no-consumers` parameter to `false`.
Default is `ON_DEMAND`.
Default is `ON_DEMAND`.
- `max-hops`. When a cluster connection decides the set of nodes to
which it might load balance a message, those nodes do not have to be
directly connected to it via a cluster connection. Apache ActiveMQ Artemis can be
configured to also load balance messages to nodes which might be
connected to it only indirectly with other Apache ActiveMQ Artemis servers as
intermediates in a chain.
- `max-hops`. When a cluster connection decides the set of nodes to
which it might load balance a message, those nodes do not have to be
directly connected to it via a cluster connection. Apache ActiveMQ Artemis can be
configured to also load balance messages to nodes which might be
connected to it only indirectly with other Apache ActiveMQ Artemis servers as
intermediates in a chain.
This allows Apache ActiveMQ Artemis to be configured in more complex topologies and
still provide message load balancing. We'll discuss this more later
in this chapter.
This allows Apache ActiveMQ Artemis to be configured in more complex topologies and
still provide message load balancing. We'll discuss this more later
in this chapter.
The default value for this parameter is `1`, which means messages
are only load balanced to other Apache ActiveMQ Artemis serves which are directly
connected to this server. This parameter is optional.
The default value for this parameter is `1`, which means messages
are only load balanced to other Apache ActiveMQ Artemis serves which are directly
connected to this server. This parameter is optional.
- `confirmation-window-size`. The size (in bytes) of the window used
for sending confirmations from the server connected to. So once the
server has received `confirmation-window-size` bytes it notifies its
client, default is 1048576. A value of -1 means no window.
- `confirmation-window-size`. The size (in bytes) of the window used
for sending confirmations from the server connected to. So once the
server has received `confirmation-window-size` bytes it notifies its
client, default is 1048576. A value of -1 means no window.
- `producer-window-size`. The size for producer flow control over cluster connection.
it's by default disabled through the cluster connection bridge but you may want
to set a value if you are using really large messages in cluster. A value of -1 means no window.
- `producer-window-size`. The size for producer flow control over cluster connection.
it's by default disabled through the cluster connection bridge but you may want
to set a value if you are using really large messages in cluster. A value of -1 means no window.
- `call-failover-timeout`. Similar to `call-timeout` but used when a
call is made during a failover attempt. Default is -1 (no timeout).
- `call-failover-timeout`. Similar to `call-timeout` but used when a
call is made during a failover attempt. Default is -1 (no timeout).
- `notification-interval`. How often (in milliseconds) the cluster
connection should broadcast itself when attaching to the cluster.
Default is 1000.
- `notification-interval`. How often (in milliseconds) the cluster
connection should broadcast itself when attaching to the cluster.
Default is 1000.
- `notification-attempts`. How many times the cluster connection
should broadcast itself when connecting to the cluster. Default is
2.
- `notification-attempts`. How many times the cluster connection
should broadcast itself when connecting to the cluster. Default is
2.
- `discovery-group-ref`. This parameter determines which discovery
group is used to obtain the list of other servers in the cluster
that this cluster connection will make connections to.
- `discovery-group-ref`. This parameter determines which discovery
group is used to obtain the list of other servers in the cluster
that this cluster connection will make connections to.
Alternatively if you would like your cluster connections to use a static
list of servers for discovery then you can do it like this.
<cluster-connection name="my-cluster">
...
<static-connectors>
<connector-ref>server0-connector</connector-ref>
<connector-ref>server1-connector</connector-ref>
</static-connectors>
</cluster-connection>
```xml
<cluster-connection name="my-cluster">
...
<static-connectors>
<connector-ref>server0-connector</connector-ref>
<connector-ref>server1-connector</connector-ref>
</static-connectors>
</cluster-connection>
```
Here we have defined 2 servers that we know for sure will that at least
one will be available. There may be many more servers in the cluster but
@ -678,8 +696,10 @@ When creating connections between nodes of a cluster to form a cluster
connection, Apache ActiveMQ Artemis uses a cluster user and cluster password which is
defined in `broker.xml`:
<cluster-user>ACTIVEMQ.CLUSTER.ADMIN.USER</cluster-user>
<cluster-password>CHANGE ME!!</cluster-password>
```xml
<cluster-user>ACTIVEMQ.CLUSTER.ADMIN.USER</cluster-user>
<cluster-password>CHANGE ME!!</cluster-password>
```
> **Warning**
>
@ -701,35 +721,35 @@ policies, and you can also implement your own and use that.
The out-of-the-box policies are
- Round Robin. With this policy the first node is chosen randomly then
each subsequent node is chosen sequentially in the same order.
- Round Robin. With this policy the first node is chosen randomly then
each subsequent node is chosen sequentially in the same order.
For example nodes might be chosen in the order B, C, D, A, B, C, D,
A, B or D, A, B, C, D, A, B, C, D or C, D, A, B, C, D, A, B, C.
For example nodes might be chosen in the order B, C, D, A, B, C, D,
A, B or D, A, B, C, D, A, B, C, D or C, D, A, B, C, D, A, B, C.
Use
`org.apache.activemq.artemis.api.core.client.loadbalance.RoundRobinConnectionLoadBalancingPolicy`
as the `<connection-load-balancing-policy-class-name>`.
Use
`org.apache.activemq.artemis.api.core.client.loadbalance.RoundRobinConnectionLoadBalancingPolicy`
as the `<connection-load-balancing-policy-class-name>`.
- Random. With this policy each node is chosen randomly.
- Random. With this policy each node is chosen randomly.
Use
`org.apache.activemq.artemis.api.core.client.loadbalance.RandomConnectionLoadBalancingPolicy`
as the `<connection-load-balancing-policy-class-name>`.
Use
`org.apache.activemq.artemis.api.core.client.loadbalance.RandomConnectionLoadBalancingPolicy`
as the `<connection-load-balancing-policy-class-name>`.
- Random Sticky. With this policy the first node is chosen randomly
and then re-used for subsequent connections.
- Random Sticky. With this policy the first node is chosen randomly
and then re-used for subsequent connections.
Use
`org.apache.activemq.artemis.api.core.client.loadbalance.RandomStickyConnectionLoadBalancingPolicy`
as the `<connection-load-balancing-policy-class-name>`.
Use
`org.apache.activemq.artemis.api.core.client.loadbalance.RandomStickyConnectionLoadBalancingPolicy`
as the `<connection-load-balancing-policy-class-name>`.
- First Element. With this policy the "first" (i.e. 0th) node is
always returned.
- First Element. With this policy the "first" (i.e. 0th) node is
always returned.
Use
`org.apache.activemq.artemis.api.core.client.loadbalance.FirstElementConnectionLoadBalancingPolicy`
as the `<connection-load-balancing-policy-class-name>`.
Use
`org.apache.activemq.artemis.api.core.client.loadbalance.FirstElementConnectionLoadBalancingPolicy`
as the `<connection-load-balancing-policy-class-name>`.
You can also implement your own policy by implementing the interface
`org.apache.activemq.artemis.api.core.client.loadbalance.ConnectionLoadBalancingPolicy`
@ -742,15 +762,17 @@ default will be used which is
The parameter `connectionLoadBalancingPolicyClassName` can be set on the URI to
configure what load balancing policy to use:
tcp://localhost:61616?connectionLoadBalancingPolicyClassName=org.apache.activemq.artemis.api.core.client.loadbalance.RandomConnectionLoadBalancingPolicy
```
tcp://localhost:61616?connectionLoadBalancingPolicyClassName=org.apache.activemq.artemis.api.core.client.loadbalance.RandomConnectionLoadBalancingPolicy
```
The set of servers over which the factory load balances can be
determined in one of two ways:
- Specifying servers explicitly in the URL. This also requires setting
the `useTopologyForLoadBalancing` parameter to `false` on the URL.
- Specifying servers explicitly in the URL. This also requires setting
the `useTopologyForLoadBalancing` parameter to `false` on the URL.
- Using discovery. This is the default behavior.
- Using discovery. This is the default behavior.
## Specifying Members of a Cluster Explicitly
@ -760,17 +782,19 @@ typically used to form non symmetrical clusters such as chain cluster or
ring clusters. This can only be done using a static list of connectors
and is configured as follows:
<cluster-connection name="my-cluster">
<address>jms</address>
<connector-ref>netty-connector</connector-ref>
<retry-interval>500</retry-interval>
<use-duplicate-detection>true</use-duplicate-detection>
<message-load-balancing>STRICT</message-load-balancing>
<max-hops>1</max-hops>
<static-connectors allow-direct-connections-only="true">
<connector-ref>server1-connector</connector-ref>
</static-connectors>
</cluster-connection>
```xml
<cluster-connection name="my-cluster">
<address/>
<connector-ref>netty-connector</connector-ref>
<retry-interval>500</retry-interval>
<use-duplicate-detection>true</use-duplicate-detection>
<message-load-balancing>STRICT</message-load-balancing>
<max-hops>1</max-hops>
<static-connectors allow-direct-connections-only="true">
<connector-ref>server1-connector</connector-ref>
</static-connectors>
</cluster-connection>
```
In this example we have set the attribute
`allow-direct-connections-only` which means that the only server that
@ -808,11 +832,13 @@ information on configuring address settings, please see [Configuring Addresses a
Here's an address settings snippet from `broker.xml`
showing how message redistribution is enabled for a set of queues:
<address-settings>
<address-setting match="#">
<redistribution-delay>0</redistribution-delay>
</address-setting>
</address-settings>
```xml
<address-settings>
<address-setting match="#">
<redistribution-delay>0</redistribution-delay>
</address-setting>
</address-settings>
```
The above `address-settings` block would set a `redistribution-delay` of
`0` for any queue which is bound to any address. So the above would enable

View File

@ -1,8 +1,11 @@
# Configuration Reload
The system will perform a periodic check on the configuration files, configured by `configuration-file-refresh-period`, with the default at 5000, in milliseconds.
The system will perform a periodic check on the configuration files, configured
by `configuration-file-refresh-period`, with the default at 5000, in
milliseconds.
Once the configuration file is changed (broker.xml) the following modules will be reloaded automatically:
Once the configuration file is changed (broker.xml) the following modules will
be reloaded automatically:
- Address Settings
- Security Settings
@ -10,7 +13,7 @@ Once the configuration file is changed (broker.xml) the following modules will b
- Addresses & queues
Notice:
**Note:**
Deletion of Address's and Queue's, not auto created is controlled by Address Settings
@ -23,50 +26,63 @@ Deletion of Address's and Queue's, not auto created is controlled by Address Set
* FORCE - will remove the queue upon config reload, even if messages remains, losing the messages in the queue.
By default both settings are OFF as such address & queues won't be removed upon reload, given the risk of losing messages.
By default both settings are OFF as such address & queues won't be removed upon
reload, given the risk of losing messages.
When OFF You may execute explicit CLI or Management operations to remove address & queues.
When OFF You may execute explicit CLI or Management operations to remove
address & queues.
## Reloadable Parameters
The broker configuration file has 2 main parts, `<core>` and `<jms>`. Some of the parameters in the 2 parts are monitored and,
if modified, reloaded into the broker at runtime.
The broker configuration file has 2 main parts, `<core>` and `<jms>`. Some of
the parameters in the 2 parts are monitored and, if modified, reloaded into the
broker at runtime.
Please note that elements under `<jms>` are deprecated. Users are encouraged to use `<core>` configuration entities.
**Note:** Elements under `<jms>` are **deprecated**. Users are encouraged to
use `<core>` configuration entities.
> *Note:*
> Most parameters reloaded take effect immediately after reloading. However there are some
> that wont take any effect unless you restarting the broker.
> **Note:**
>
> Most parameters reloaded take effect immediately after reloading. However
> there are some that wont take any effect unless you restarting the broker.
> Such parameters are specifically indicated in the following text.
### `<core>`
### `<core>`
#### `<security-settings>`
* `<security-setting>` element
Changes to any <security-setting> elements will be reloaded. Each <security-setting> defines security roles for a matched address.
Changes to any `<security-setting>` elements will be reloaded. Each
`<security-setting>` defines security roles for a matched address.
* The `match` attribute
* The `match` attribute
This attribute defines the address for which the security-setting is defined. It can take wildcards such as # and *.
This attribute defines the address for which the security-setting is
defined. It can take wildcards such as # and *.
* The `<permission>` sub-elements
* The `<permission>` sub-elements
Each `<security-setting>` can have a list of `<permission>` elements, each of which defines a specific permission-roles mapping.
Each permission has 2 attributes type and roles. The type attribute defines the type of operation allowed, the roles
defines which roles are allowed to perform such operation. Refer to the users manual for a list of operations that can be defined.
Each `<security-setting>` can have a list of `<permission>` elements, each
of which defines a specific permission-roles mapping. Each permission has 2
attributes type and roles. The type attribute defines the type of
operation allowed, the roles defines which roles are allowed to perform such
operation. Refer to the users manual for a list of operations that can be
defined.
> *Note:*
> Once loaded the security-settings will take effect immediately. Any new clients will subject
> to the new security settings. Any existing clients will subject to the new settings as well, as soon as they performs
> a new security-sensitive operation.
> **Note:**
>
> Once loaded the security-settings will take effect immediately. Any new
> clients will subject to the new security settings. Any existing clients will
> subject to the new settings as well, as soon as they performs a new
> security-sensitive operation.
Below lists the effects of adding, deleting and updating of an element/attribute within the `<security-settings>` element, whether
an change can be done or cant be done.
Below lists the effects of adding, deleting and updating of an
element/attribute within the `<security-settings>` element, whether an change
can be done or cant be done.
Operation | Add | Delete | Update
:--- | :--- | :--- | :---
---|---|---|---
`<security-settings>` | X* (at most one element is allowed) | Deleting it means delete the whole security settings from the running broker. | N/A*
`<security-setting>` | Adding one element means adding a new set of security roles for an address in the running broker | Deleting one element means removing a set of security roles for an address in the running broker | Updating one element means updating the security roles for an address (if match attribute is not changed), or means removing the old match address settings and adding a new one (if match attribute is changed)
attribute `match` | N/A* | X* | Changing this value is same as deleting the whole <security-setting> with the old match value and adding
@ -81,21 +97,28 @@ attribute `roles` | N/A* | X* | Changing the roles value means updating th
* `<address-settings>` element
Changes to elements under `<address-settings>` will be reloaded into runtime broker. It contains a list of `<address-setting>` elements.
Changes to elements under `<address-settings>` will be reloaded into runtime
broker. It contains a list of `<address-setting>` elements.
* `<address-setting>` element
Each address-setting element has a match attribute that defines an address pattern for which this address-setting is defined. It also has a list of sub-elements used to define the properties of a matching address.
> *Note:*
> Parameters reloaded in this category will take effect immediately after reloading. The effect of deletion of Address's and Queue's,
> not auto created is controlled by parameter `config-delete-addresses` and `config-delete-queues` as described in the doc.
Below lists the effects of adding, deleting and updating of an element/attribute within the address-settings element, whether an change
can be done or cant be done.
Each address-setting element has a match attribute that defines an address
pattern for which this address-setting is defined. It also has a list of
sub-elements used to define the properties of a matching address.
> **Note:**
>
> Parameters reloaded in this category will take effect immediately
> after reloading. The effect of deletion of Address's and Queue's, not auto
> created is controlled by parameter `config-delete-addresses` and
> `config-delete-queues` as described in the doc.
Below lists the effects of adding, deleting and updating of an
element/attribute within the address-settings element, whether an change can be
done or cant be done.
Operation | Add | Delete | Update
:--- | :--- | :--- | :---
---|---|---|---
`<address-settings>` | X(at most one element is allowed) | Deleting it means delete the whole address settings from the running broker | N/A
`<address-setting>` | Adding one element means adding a set of address-setting for a new address in the running broker | Deleting one means removing a set of address-setting for an address in the running broker | Updating one element means updating the address setting for an address (if match attribute is not changed), or means removing the old match address settings and adding a new one (if match attribute is changed)
attribute `match` | N/A | X | Changing this value is same as deleting the whole <address-setting> with the old match value and adding a new one with the new match value.
@ -132,20 +155,22 @@ attribute `match` | N/A | X | Changing this value is same as deleting the whole
#### `<diverts>`
All `<divert>` elements will be reloaded. Each `<divert>` element
has a name and several sub-elements that defines the properties of a divert.
All `<divert>` elements will be reloaded. Each `<divert>` element has a name
and several sub-elements that defines the properties of a divert.
> *Note:*
> Reloading `<diverts>` only resulting in deploying new diverts. Existing diverts
> wont get undeployed even if you delete a `<divert>` element. Nor an existing
> divert will be updated if its element is updated after reloading.
> To make this happen you need a restart of the broker.
> **Note:**
>
> Reloading `<diverts>` only resulting in deploying new diverts. Existing diverts
> wont get undeployed even if you delete a `<divert>` element. Nor an existing
> divert will be updated if its element is updated after reloading. To make
> this happen you need a restart of the broker.
Below lists the effects of adding, deleting and updating of an element/attribute
within the diverts element, whether an change can be done or cant be done.
Below lists the effects of adding, deleting and updating of an
element/attribute within the diverts element, whether an change can be done or
cant be done.
Operation | Add | Delete | Update
:--- | :--- | :--- | :---
---|---|---|---
`<diverts>` | X (no more than one can be present) | Deleting it means delete (undeploy) all diverts in running broker. | N/A
`<divert>` | Adding a new divert. It will be deployed after reloading | No effect on the deployed divert.(unless restarting broker, in which case the divert will no longer be deployed) | No effect on the deployed divert (unless restarting broker, in which case the divert will be redeployed)
attribute `name` | N/A | X | A new divert with the name will be deployed. (if it is not already there in broker). Otherwise no effect.
@ -159,23 +184,28 @@ attribute `name` | N/A | X | A new divert with the name will be deployed. (if it
#### `<addresses>`
The `<addresses>` element contains a list `<address>` elements. Once changed, all `<address>` elements
in `<addresses>` will be reloaded.
The `<addresses>` element contains a list `<address>` elements. Once changed,
all `<address>` elements in `<addresses>` will be reloaded.
> *Note:*
> Once reloaded, all new addresses (as well as the pre-configured queues) will be
> deployed to the running broker and all those that are missing from the configuration will be undeployed.
> **Note:**
>
> Once reloaded, all new addresses (as well as the pre-configured queues) will
> be deployed to the running broker and all those that are missing from the
> configuration will be undeployed.
> *Note:*
> Parameters reloaded in this category will take effect immediately after reloading.
> The effect of deletion of Address's and Queue's, not auto created is controlled by
> parameter `config-delete-addresses` and `config-delete-queues` as described in this doc.
> **Note:**
>
> Parameters reloaded in this category will take effect immediately after
> reloading. The effect of deletion of Address's and Queue's, not auto created
> is controlled by parameter `config-delete-addresses` and
> `config-delete-queues` as described in this doc.
Below lists the effects of adding, deleting and updating of an element/attribute
within the `<addresses>` element, whether an change can be done or cant be done.
Below lists the effects of adding, deleting and updating of an
element/attribute within the `<addresses>` element, whether an change can be
done or cant be done.
Operation | Add | Delete | Update
:--- | :--- | :--- | :---
---|---|---|---
`<addresses>` | X(no more than one is present) | Deleting it means delete (undeploy) all diverts in running broker. | N/A
`<address>` | A new address will be deployed in the running broker | The corresponding address will be undeployed. | N/A
attribute `name` | N/A | X | After reloading the address of the old name will be undeployed and the new will be deployed.
@ -186,21 +216,27 @@ attribute `name` | N/A | X | After reloading the address of the old name will be
#### `<queues>`
The `<queues>` element contains a list `<queue>` elements. Once changed, all `<queue>` elements in `<queues>` will be reloaded.
The `<queues>` element contains a list `<queue>` elements. Once changed, all
`<queue>` elements in `<queues>` will be reloaded.
> *Note:*
> Once reloaded, all new queues will be deployed to the running broker and all
> **Note:**
>
> Once reloaded, all new queues will be deployed to the running broker and all
> queues that are missing from the configuration will be undeployed.
> *Note:*
> Parameters reloaded in this category will take effect immediately after reloading.
> The effect of deletion of Address's and Queue's, not auto created is controlled by
> parameter `config-delete-addresses` and `config-delete-queues` as described in this doc.
Below lists the effects of adding, deleting and updating of an element/attribute within the `<queues>` element,
and whether an change can be done or cant be done.
> **Note:**
>
> Parameters reloaded in this category will take effect immediately after
> reloading. The effect of deletion of Address's and Queue's, not auto created
> is controlled by parameter `config-delete-addresses` and
> `config-delete-queues` as described in this doc.
Below lists the effects of adding, deleting and updating of an
element/attribute within the `<queues>` element, and whether an change can be
done or cant be done.
Operation | Add | Delete | Update
:--- | :--- | :--- | :---
---|---|---|---
`<queues>` | X(no more than one is present) | Deleting it means delete (undeploy) all queues from running broker. | N/A
`<queue>` | A new queue is deployed after reloading | The queue will be undeployed after reloading. | N/A
attribute `name` | N/A | X | A queue with new name will be deployed and the queue with old name will be updeployed after reloading (see Note above).
@ -214,15 +250,17 @@ attribute `durable` | N/A | No effect unless starting broker | No effect unless
#### `<queue>`
Changes to any `<queue>` elements will be reloaded to the running broker.
Changes to any `<queue>` elements will be reloaded to the running broker.
> *Note:*
> Once reloaded, new queues defined in the new changes will be deployed to the running
> broker. However existing queues wont get undeployed even if the matching element is
> deleted/missing. Also new queue elements matching existing queues wont get re-created they remain unchanged.
> **Note:**
>
> Once reloaded, new queues defined in the new changes will be deployed to the
> running broker. However existing queues wont get undeployed even if the
> matching element is deleted/missing. Also new queue elements matching
> existing queues wont get re-created they remain unchanged.
Operation | Add | Delete | Update
:--- | :--- | :--- | :---
---|---|---|---
`<queue>` | A new jms queue will be deployed after reloading | No effect unless starting broker | No effect unless starting broker
attribute `<name>` | N/A | X | A jms queue of the new name will be deployed after reloading
`<selector>` | X(no more than one is present) | No effect unless starting broker | No effect unless starting broker
@ -230,15 +268,16 @@ attribute `<name>` | N/A | X | A jms queue of the new name will be deployed afte
#### `<topic>`
Changes to any `<topic>` elements will be reloaded to the running broker.
Changes to any `<topic>` elements will be reloaded to the running broker.
> *Note:*
> Once reloaded, new topics defined in the new changes will be deployed to
> the running broker. However existing topics wont get undeployed even if the
> **Note:**
>
> Once reloaded, new topics defined in the new changes will be deployed to the
> running broker. However existing topics wont get undeployed even if the
> matching element is deleted/missing. Also any `<topic>` elements matching
> existing topics wont get re-deployed they remain unchanged.
Operation | Add | Delete | Update
:--- | :--- | :--- | :---
---|---|---|---
`<topic>` | A new jms topic will be deployed after reloading | No effect unless starting broker | No effect unless starting broker
attribute `name` | N/A | X | A jms topic of the new name will be deployed after reloading

View File

@ -1,51 +1,29 @@
Configuration Reference
=======================
# Configuration Reference
This section is a quick index for looking up configuration. Click on the
element name to go to the specific chapter.
Server Configuration
====================
## Broker Configuration
broker.xml
--------------------------
### broker.xml
This is the main core server configuration file which contains the 'core'
element.
The 'core' element contains the main server configuration.
This is the main core server configuration file which contains the `core`
element. The `core` element contains the main server configuration.
# System properties
#### Modularising broker.xml
It is possible to use System properties to replace some of the configuration properties. If you define a System property starting with "brokerconfig." that will be passed along to Bean Utils and the configuration would be replaced.
To define global-max-size=1000000 using a system property you would have to define this property, for example through java arguments:
```
java -Dbrokerconfig.globalMaxSize=1000000
```
You can also change the prefix through the broker.xml by setting:
```
<system-property-prefix>yourprefix</system-property-prefix>
```
This is to help you customize artemis on embedded systems.
# Modularising config into separate files.
XML XInclude support is provided in the configuration as such if you wish to break your configuration out into separate files you can.
XML XInclude support is provided in `broker.xml` so that you can break your configuration out into separate files.
To do this ensure the following is defined at the root configuration element.
```
xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:xi="http://www.w3.org/2001/XInclude"
```
You can now define include tag's where you want to bring in xml configuration from another file:
```
<xi:include href="my-address-settings.xml"/>
<xi:include href="my-address-settings.xml"/>
```
You should ensure xml elements in separated files should be namespaced correctly for example if address-settings element was separated, it should have the element namespace defined:
@ -55,241 +33,338 @@ You should ensure xml elements in separated files should be namespaced correctly
```
An example can of this feature can be seen in the test suites:
```
./artemis-server/src/test/resources/ConfigurationTest-xinclude-config.xml
```
N.B. if you use xmllint to validate xml's against schema you should enable xinclude flag when running.
./artemis-server/src/test/resources/ConfigurationTest-xinclude-config.xml
```
**Note:** if you use `xmllint` to validate the XML against the schema you should enable xinclude flag when running.
```
--xinclude
--xinclude
```
For further information on XInclude see:
[https://www.w3.org/TR/xinclude/](https://www.w3.org/TR/xinclude/)
[https://www.w3.org/TR/xinclude/](https://www.w3.org/TR/xinclude/)
### System properties
It is possible to use System properties to replace some of the configuration properties. If you define a System property starting with "brokerconfig." that will be passed along to Bean Utils and the configuration would be replaced.
To define global-max-size=1000000 using a system property you would have to define this property, for example through java arguments:
```
java -Dbrokerconfig.globalMaxSize=1000000
```
You can also change the prefix through the `broker.xml` by setting:
```
<system-property-prefix>yourprefix</system-property-prefix>
```
This is to help you customize artemis on embedded systems.
# The core configuration
## The core configuration
This describes the root of the XML configuration. You will see here also multiple sub-types listed.
For example on the main config you will have bridges and at the [list of bridge](#bridge-type) type we will describe the properties for that configuration.
Name | Description
:--- | :---
[acceptors](configuring-transports.md "Understanding Acceptors") | a list of remoting acceptors
[acceptors.acceptor](configuring-transports.md "Understanding Acceptors") | Each acceptor is composed for just an URL
[address-settings](address-model.md "Configuring Addresses and Queues Via Address Settings") | [a list of address-setting](#address-setting-type)
[allow-failback](ha.md "Failing Back to live Server") | Should stop backup on live restart. default true
[amqp-use-core-subscription-naming](using-AMQP.md "Message Conversions") | If true uses CORE queue naming convention for AMQP. default false
[async-connection-execution-enabled](connection-ttl.md "Configuring Asynchronous Connection Execution") | If False delivery would be always asynchronous. default true
[bindings-directory](persistence.md "Configuring the bindings journal") | The folder in use for the bindings folder
[bridges](core-bridges.md "Core Bridges") | [a list of bridge](#bridge-type)
[broadcast-groups](clusters.md "Clusters") | [a list of broadcast-group](#broadcast-group-type)
[configuration-file-refresh-period](config-reload.md) | The frequency in milliseconds the configuration file is checked for changes (default 5000)
[check-for-live-server](ha.md) | Used for a live server to verify if there are other nodes with the same ID on the topology
[cluster-connections](clusters.md "Clusters") | [a list of cluster-connection](#cluster-connection-type)
[cluster-password](clusters.md "Clusters") | Cluster password. It applies to all cluster configurations.
[cluster-user](clusters.md "Clusters") | Cluster username. It applies to all cluster configurations.
[connection-ttl-override](connection-ttl.md) | if set, this will override how long (in ms) to keep a connection alive without receiving a ping. -1 disables this setting. Default -1
[connection-ttl-check-period](connection-ttl.md) | how often (in ms) to check connections for ttl violation. Default 2000
[connectors.connector](configuring-transports.md "Understanding Connectors") | The URL for the connector. This is a list
[create-bindings-dir](persistence.md "Configuring the bindings journal") | true means that the server will create the bindings directory on start up. Default=true
[create-journal-dir](persistence.md) | true means that the journal directory will be created. Default=true
[discovery-groups](clusters.md "Clusters") | [a list of discovery-group](#discovery-group-type)
[disk-scan-period](paging.md#max-disk-usage) | The interval where the disk is scanned for percentual usage. Default=5000 ms.
[diverts](diverts.md "Diverting and Splitting Message Flows") | [a list of diverts to use](#divert-type)
[global-max-size](paging.md#global-max-size) | The amount in bytes before all addresses are considered full. Default is half of the memory used by the JVM (-Xmx argument).
[graceful-shutdown-enabled](graceful-shutdown.md "Graceful Server Shutdown") | true means that graceful shutdown is enabled. Default=false
[graceful-shutdown-timeout](graceful-shutdown.md "Graceful Server Shutdown") | Timeout on waiting for clients to disconnect before server shutdown. Default=-1
[grouping-handler](message-grouping.md "Message Grouping") | Message Group configuration
[id-cache-size](duplicate-detection.md "Configuring the Duplicate ID Cache") | The duplicate detection circular cache size. Default=20000
[jmx-domain](management.md "Configuring JMX") | the JMX domain used to registered MBeans in the MBeanServer. Default=org.apache.activemq
[jmx-management-enabled](management.md "Configuring JMX") | true means that the management API is available via JMX. Default=true
[journal-buffer-size](persistence.md) | The size of the internal buffer on the journal in KB. Default=490 KiB
[journal-buffer-timeout](persistence.md) | The Flush timeout for the journal buffer
[journal-compact-min-files](persistence.md) | The minimal number of data files before we can start compacting. Setting this to 0 means compacting is disabled. Default=10
[journal-compact-percentage](persistence.md) | The percentage of live data on which we consider compacting the journal. Default=30
[journal-directory](persistence.md) | the directory to store the journal files in. Default=data/journal
[journal-file-size](persistence.md) | the size (in bytes) of each journal file. Default=10485760 (10 MB)
[journal-max-io](persistence.md#configuring-the-message-journal) | the maximum number of write requests that can be in the AIO queue at any one time. Default is 4096 for AIO and 1 for NIO, ignored for MAPPED.
[journal-min-files](persistence.md#configuring-the-message-journal) | how many journal files to pre-create. Default=2
[journal-pool-files](persistence.md#configuring-the-message-journal) | The upper threshold of the journal file pool,-1 (default) means no Limit. The system will create as many files as needed however when reclaiming files it will shrink back to the `journal-pool-files`
[journal-sync-non-transactional](persistence.md) | if true wait for non transaction data to be synced to the journal before returning response to client. Default=true
[journal-sync-transactional](persistence.md) | if true wait for transaction data to be synchronized to the journal before returning response to client. Default=true
[journal-type](persistence.md) | the type of journal to use. Default=ASYNCIO
[journal-datasync](persistence.md) | It will use msync/fsync on journal operations. Default=true.
[large-messages-directory](large-messages.md "Configuring the server") | the directory to store large messages. Default=data/largemessages
[management-address](management.md "Configuring Core Management") | the name of the management address to send management messages to. Default=activemq.management
[management-notification-address](management.md "Configuring The Core Management Notification Address") | the name of the address that consumers bind to receive management notifications. Default=activemq.notifications
[mask-password](masking-passwords.md "Masking Passwords") | This option controls whether passwords in server configuration need be masked. If set to "true" the passwords are masked. Default=false
[max-saved-replicated-journals-size](ha.md#data-replication) | This specifies how many times a replicated backup server can restart after moving its files on start. Once there are this number of backup journal files the server will stop permanently after if fails back. -1 Means no Limit, 0 don't keep a copy at all, Default=2
[max-disk-usage](paging.md#max-disk-usage) | The max percentage of data we should use from disks. The System will block while the disk is full. Disable by setting -1. Default=100
[memory-measure-interval](perf-tuning.md) | frequency to sample JVM memory in ms (or -1 to disable memory sampling). Default=-1
[memory-warning-threshold](perf-tuning.md) | Percentage of available memory which will trigger a warning log. Default=25
[message-counter-enabled](management.md "Configuring Message Counters") | true means that message counters are enabled. Default=false
[message-counter-max-day-history](management.md "Configuring Message Counters") | how many days to keep message counter history. Default=10 (days)
[message-counter-sample-period](management.md "Configuring Message Counters") | the sample period (in ms) to use for message counters. Default=10000
[message-expiry-scan-period](message-expiry.md "Configuring The Expiry Reaper Thread") | how often (in ms) to scan for expired messages. Default=30000
[message-expiry-thread-priority](message-expiry.md "Configuring The Expiry Reaper Thread") | the priority of the thread expiring messages. Default=3
[password-codec](masking-passwords.md "Masking Passwords") | the name of the class (and optional configuration properties) used to decode masked passwords. Only valid when `mask-password` is `true`. Default=empty
[page-max-concurrent-io](paging.md "Paging Mode") | The max number of concurrent reads allowed on paging. Default=5
[paging-directory](paging.md "Configuration") | the directory to store paged messages in. Default=data/paging
[persist-delivery-count-before-delivery](undelivered-messages.md "Delivery Count Persistence") | True means that the delivery count is persisted before delivery. False means that this only happens after a message has been cancelled. Default=false
[persistence-enabled](persistence.md "Configuring ActiveMQ Artemis for Zero Persistence") | true means that the server will use the file based journal for persistence. Default=true
[persist-id-cache](duplicate-detection.md "Configuring the Duplicate ID Cache") | true means that ID's are persisted to the journal. Default=true
[queues](address-model.md "Predefined Queues") | [a list of queue to be created](#queue-type)
[remoting-incoming-interceptors](intercepting-operations.md "Intercepting Operations") | A list of interceptor
[resolveProtocols]() | Use [ServiceLoader](https://docs.oracle.com/javase/tutorial/ext/basics/spi.html) to load protocol modules. Default=true
[scheduled-thread-pool-max-size](thread-pooling.md#server-scheduled-thread-pool "Server Scheduled Thread Pool")| Maximum number of threads to use for the scheduled thread pool. Default=5
[security-enabled](security.md "Security") | true means that security is enabled. Default=true
[security-invalidation-interval](security.md "Security") | how long (in ms) to wait before invalidating the security cache. Default=10000
system-property-prefix | Prefix for replacing configuration settings using Bean Utils.
[populate-validated-user](security.md "Security") | whether or not to add the name of the validated user to the messages that user sends. Default=false
[security-settings](security.md "Role based security for addresses") | [a list of security-setting](#security-setting-type)
[thread-pool-max-size](thread-pooling.md "Server Scheduled Thread Pool") | Maximum number of threads to use for the thread pool. -1 means 'no limits'.. Default=30
[transaction-timeout](transaction-config.md "Resource Manager Configuration") | how long (in ms) before a transaction can be removed from the resource manager after create time. Default=300000
[transaction-timeout-scan-period](transaction-config.md "Resource Manager Configuration") | how often (in ms) to scan for timeout transactions. Default=1000
[wild-card-routing-enabled](wildcard-routing.md "Routing Messages With Wild Cards") | true means that the server supports wild card routing. Default=true
[network-check-NIC](network-isolation.md) | The NIC (Network Interface Controller) to be used on InetAddress.isReachable
[network-check-URL](network-isolation.md) | The list of http URIs to be used to validate the network
[network-check-list](network-isolation.md) | The list of pings to be used on ping or InetAddress.isReachable
[network-check-ping-command](network-isolation.md) | The command used to oping IPV4 addresses
[network-check-ping6-command](network-isolation.md) | The command used to oping IPV6 addresses
[critical-analyzer](critical-analysis.md) | Enable or disable the critical analysis (default true)
[critical-analyzer-timeout](critical-analysis.md) | Timeout used to do the critical analysis (default 120000 milliseconds)
[critical-analyzer-check-period](critical-analysis.md) | Time used to check the response times (default half of critical-analyzer-timeout)
[critical-analyzer-policy](critical-analysis.md) | Should the server log, be halted or shutdown upon failures (default `LOG`)
Name | Description | Default
---|---|---
[acceptors](configuring-transports.md#acceptors) | a list of remoting acceptors | n/a
[acceptors.acceptor](configuring-transports.md#acceptors) | Each acceptor is composed for just an URL | n/a
[addresses](address-model.md#basic-address-configuration) | [a list of addresses](#address-type) | n/a
[address-settings](address-model.md#configuring-addresses-and-queues-via-address-settings) | [a list of address-setting](#address-setting-type) | n/a
[allow-failback](ha.md#failing-back-to-live-server)| Should stop backup on live restart. | `true`
[amqp-use-core-subscription-naming](amqp.md) | If true uses CORE queue naming convention for AMQP. | `false`
[async-connection-execution-enabled](connection-ttl.md) | If False delivery would be always asynchronous. | `true`
[bindings-directory](persistence.md) | The folder in use for the bindings folder | `data/bindings`
[bridges](core-bridges.md) | [a list of core bridges](#bridge-type) | n/a
[ha-policy](ha.md) | the HA policy of this server | none
[broadcast-groups](clusters.md#broadcast-groups) | [a list of broadcast-group](#broadcast-group-type) | n/a
[broker-plugins](broker-plugins.md) | [a list of broker-plugins](#broker-plugin-type) | n/a
[configuration-file-refresh-period](config-reload.md) | The frequency in milliseconds the configuration file is checked for changes | 5000
[check-for-live-server](ha.md#data-replication)| Used for a live server to verify if there are other nodes with the same ID on the topology | n/a
[cluster-connections](clusters.md#configuring-cluster-connections) | [a list of cluster-connection](#cluster-connection-type) | n/a
[cluster-password](clusters.md) |Cluster password. It applies to all cluster configurations. | n/a
[cluster-user](clusters.md) |Cluster username. It applies to all cluster configurations. | n/a
[connection-ttl-override](connection-ttl.md) |if set, this will override how long (in ms) to keep a connection alive without receiving a ping. -1 disables this setting. | -1
[connection-ttl-check-interval](connection-ttl.md) |how often (in ms) to check connections for ttl violation. | 2000
[connectors.connector](configuring-transports.md) | The URL for the connector. This is a list | n/a
[create-bindings-dir](persistence.md) | true means that the server will create the bindings directory on start up. | `true`
[create-journal-dir](persistence.md)| true means that the journal directory will be created. | `true`
[discovery-groups](clusters.md#discovery-groups)| [a list of discovery-group](#discovery-group-type) | n/a
[disk-scan-period](paging.md#max-disk-usage) | The interval where the disk is scanned for percentual usage. | 5000
[diverts](diverts.md) | [a list of diverts to use](#divert-type) | n/a
[global-max-size](paging.md#global-max-size) | The amount in bytes before all addresses are considered full. | Half of the JVM's `-Xmx`
[graceful-shutdown-enabled](graceful-shutdown.md)| true means that graceful shutdown is enabled. | `false`
[graceful-shutdown-timeout](graceful-shutdown.md)| Timeout on waiting for clients to disconnect before server shutdown. | -1
[grouping-handler](message-grouping.md) | [a message grouping handler](#grouping-handler-type) | n/a
[id-cache-size](duplicate-detection.md#configuring-the-duplicate-id-cache) | The duplicate detection circular cache size. | 20000
[jmx-domain](management.md#configuring-jmx) | the JMX domain used to registered MBeans in the MBeanServer. | `org.apache.activemq`
[jmx-use-broker-name](management.md#configuring-jmx) | whether or not to use the broker name in the JMX properties. | `true`
[jmx-management-enabled](management.md#configuring-jmx) | true means that the management API is available via JMX. | `true`
[journal-buffer-size](persistence.md#configuring-the-message-journal) | The size of the internal buffer on the journal in KB. | 490KB
[journal-buffer-timeout](persistence.md#configuring-the-message-journal) | The Flush timeout for the journal buffer | 500000 for ASYNCIO; 3333333 for NIO
[journal-compact-min-files](persistence.md#configuring-the-message-journal) | The minimal number of data files before we can start compacting. Setting this to 0 means compacting is disabled. | 10
[journal-compact-percentage](persistence.md#configuring-the-message-journal) | The percentage of live data on which we consider compacting the journal. | 30
[journal-directory](persistence.md#configuring-the-message-journal) | the directory to store the journal files in. | `data/journal`
[journal-file-size](persistence.md#configuring-the-message-journal) | the size (in bytes) of each journal file. | 10MB
[journal-lock-acquisition-timeout](persistence.md#configuring-the-message-journal) | how long (in ms) to wait to acquire a file lock on the journal. | -1
[journal-max-io](persistence.md#configuring-the-message-journal) | the maximum number of write requests that can be in the ASYNCIO queue at any one time. | 4096 for ASYNCIO; 1 for NIO; ignored for MAPPED
[journal-file-open-timeout](persistence.md#configuring-the-message-journal) | the length of time in seconds to wait when opening a new journal file before timing out and failing. | 5
[journal-min-files](persistence.md#configuring-the-message-journal) | how many journal files to pre-create. | 2
[journal-pool-files](persistence.md#configuring-the-message-journal) | The upper threshold of the journal file pool, -1 means no Limit. The system will create as many files as needed however when reclaiming files it will shrink back to the `journal-pool-files` | -1
[journal-sync-non-transactional](persistence.md#configuring-the-message-journal) | if true wait for non transaction data to be synced to the journal before returning response to client. | `true`
[journal-sync-transactional](persistence.md#configuring-the-message-journal)| if true wait for transaction data to be synchronized to the journal before returning response to client. | `true`
[journal-type](persistence.md#configuring-the-message-journal) | the type of journal to use. | `ASYNCIO`
[journal-datasync](persistence.md#configuring-the-message-journal) | It will use msync/fsync on journal operations. | `true`
[large-messages-directory](large-messages.md) | the directory to store large messages. | `data/largemessages`
log-delegate-factory-class-name | **deprecated** the name of the factory class to use for log delegation. | n/a
[management-address](management.md#configuring-management)| the name of the management address to send management messages to. | `activemq.management`
[management-notification-address](management.md#configuring-the-management-notification-address) | the name of the address that consumers bind to receive management notifications. | `activemq.notifications`
[mask-password](masking-passwords.md) | This option controls whether passwords in server configuration need be masked. If set to "true" the passwords are masked. | `false`
[max-saved-replicated-journals-size](ha.md#data-replication) | This specifies how many times a replicated backup server can restart after moving its files on start. Once there are this number of backup journal files the server will stop permanently after if fails back. -1 Means no Limit; 0 don't keep a copy at all. | 2
[max-disk-usage](paging.md#max-disk-usage) | The max percentage of data we should use from disks. The System will block while the disk is full. Disable by setting -1. | 100
[memory-measure-interval](perf-tuning.md) | frequency to sample JVM memory in ms (or -1 to disable memory sampling). | -1
[memory-warning-threshold](perf-tuning.md)| Percentage of available memory which will trigger a warning log. | 25
[message-counter-enabled](management.md#message-counters) | true means that message counters are enabled. | `false`
[message-counter-max-day-history](management.md#message-counters)| how many days to keep message counter history. | 10
[message-counter-sample-period](management.md#message-counters) | the sample period (in ms) to use for message counters. | 10000
[message-expiry-scan-period](message-expiry.md#configuring-the-expiry-reaper-thread) | how often (in ms) to scan for expired messages. | 30000
[message-expiry-thread-priority](message-expiry.md#configuring-the-expiry-reaper-thread)| the priority of the thread expiring messages. | 3
name | node name; used in topology notifications if set. | n/a
[password-codec](masking-passwords.md) | the name of the class (and optional configuration properties) used to decode masked passwords. Only valid when `mask-password` is `true`. | n/a
[page-max-concurrent-io](paging.md) | The max number of concurrent reads allowed on paging. | 5
[paging-directory](paging.md#configuration)| the directory to store paged messages in. | `data/paging`
[persist-delivery-count-before-delivery](undelivered-messages.md#delivery-count-persistence) | True means that the delivery count is persisted before delivery. False means that this only happens after a message has been cancelled. | `false`
[persistence-enabled](persistence.md#zero-persistence)| true means that the server will use the file based journal for persistence. | `true`
[persist-id-cache](duplicate-detection.md#configuring-the-duplicate-id-cache) | true means that ID's are persisted to the journal. | `true`
queues | **deprecated** [use addresses](#address-type) | n/a
[remoting-incoming-interceptors](intercepting-operations.md)| a list of &lt;class-name/&gt; elements with the names of classes to use for intercepting incoming remoting packets | n/a
[remoting-outgoing-interceptors](intercepting-operations.md)| a list of &lt;class-name/&gt; elements with the names of classes to use for intercepting outgoing remoting packets | n/a
[resolveProtocols]() | Use [ServiceLoader](https://docs.oracle.com/javase/tutorial/ext/basics/spi.html) to load protocol modules. | `true`
[resource-limit-settings](resource-limits.md) | [a list of resource-limits](#resource-limit-type) | n/a
[scheduled-thread-pool-max-size](thread-pooling.md#server-scheduled-thread-pool)| Maximum number of threads to use for the scheduled thread pool. | 5
[security-enabled](security.md) | true means that security is enabled. | `true`
[security-invalidation-interval](security.md) | how long (in ms) to wait before invalidating the security cache. | 10000
system-property-prefix | Prefix for replacing configuration settings using Bean Utils. | n/a
internal-naming-prefix | the prefix used when naming the internal queues and addresses required for implementing certain behaviours. | `$.activemq.internal`
[populate-validated-user](security.md#tracking-the-validated-user)| whether or not to add the name of the validated user to the messages that user sends. | `false`
[security-settings](security.md#role-based-security-for-addresses) | [a list of security-setting](#security-setting-type). | n/a
[thread-pool-max-size](thread-pooling.md#thread-management) | Maximum number of threads to use for the thread pool. -1 means 'no limits'. | 30
[transaction-timeout](transaction-config.md) | how long (in ms) before a transaction can be removed from the resource manager after create time. | 300000
[transaction-timeout-scan-period](transaction-config.md) | how often (in ms) to scan for timeout transactions. | 1000
[wild-card-routing-enabled](wildcard-routing.md) | true means that the server supports wild card routing. | `true`
[network-check-NIC](network-isolation.md) | the NIC (Network Interface Controller) to be used on InetAddress.isReachable. | n/a
[network-check-URL-list](network-isolation.md) | the list of http URIs to be used to validate the network. | n/a
[network-check-list](network-isolation.md) | the list of pings to be used on ping or InetAddress.isReachable. | n/a
[network-check-period](network-isolation.md) | a frequency in milliseconds to how often we should check if the network is still up. | 10000
[network-check-timeout](network-isolation.md) | a timeout used in milliseconds to be used on the ping. | 1000
[network-check-ping-command](network-isolation.md) | the command used to oping IPV4 addresses. | n/a
[network-check-ping6-command](network-isolation.md) | the command used to oping IPV6 addresses. | n/a
[critical-analyzer](critical-analysis.md) | enable or disable the critical analysis. | `true`
[critical-analyzer-timeout](critical-analysis.md) | timeout used to do the critical analysis. | 120000 ms
[critical-analyzer-check-period](critical-analysis.md) | time used to check the response times. | 0.5 \* `critical-analyzer-timeout`
[critical-analyzer-policy](critical-analysis.md) | should the server log, be halted or shutdown upon failures. | `LOG`
resolve-protocols | if true then the broker will make use of any protocol managers that are in available on the classpath, otherwise only the core protocol will be available, unless in embedded mode where users can inject their own protocol managers. | `true`
[resource-limit-settings](resource-limits.md) | [a list of resource-limit](#resource-limit-type). | n/a
server-dump-interval | interval to log server specific information (e.g. memory usage etc). | -1
store | the store type used by the server. | n/a
[wildcard-addresses](wildcard-syntax.md) | parameters to configure wildcard address matching format. | n/a
## address-setting type
Name | Description | Default
---|---|---
[match](address-model.md) | The filter to apply to the setting | n/a
[dead-letter-address](undelivered-messages.md) | Dead letter address | n/a
[expiry-address](message-expiry.md) | Expired messages address | n/a
[expiry-delay](address-model.md) | Expiration time override; -1 don't override | -1
[redelivery-delay](undelivered-messages.md) | Time to wait before redelivering a message | 0
[redelivery-delay-multiplier](address-model.md) | Multiplier to apply to the `redelivery-delay` | 1.0
[max-redelivery-delay](address-model.md) | Max value for the `redelivery-delay` | 10 \* `redelivery-delay`
[max-delivery-attempts](undelivered-messages.md)| Number of retries before dead letter address| 10
[max-size-bytes](paging.md)| Max size a queue can be before invoking `address-full-policy` | -1
[max-size-bytes-reject-threshold]() | Used with `BLOCK`, the max size an address can reach before messages are rejected; works in combination with `max-size-bytes` **for AMQP clients only**. | -1
[page-size-bytes](paging.md) | Size of each file on page | 10485760
[page-max-cache-size](paging.md) | Maximum number of files cached from paging | 5
[address-full-policy](address-model.md)| What to do when a queue reaches `max-size-bytes` | `PAGE`
[message-counter-history-day-limit](address-model.md) | Days to keep message counter data | 0
[last-value-queue](last-value-queues.md) | **deprecated** Queue is a last value queue; see `default-last-value-queue` instead | `false`
[default-last-value-queue](last-value-queues.md)| `last-value` value if none is set on the queue | `false`
[default-exclusive-queue](exclusive-queues.md) | `exclusive` value if none is set on the queue | `false`
[redistribution-delay](clusters.md) | Timeout before redistributing values after no consumers | -1
[send-to-dla-on-no-route](address-model.md) | Forward messages to DLA when no queues subscribing | `false`
[slow-consumer-threshold](slow-consumers.md) | Min rate of msgs/sec consumed before a consumer is considered "slow" | -1
[slow-consumer-policy](slow-consumers.md) | What to do when "slow" consumer is detected | `NOTIFY`
[slow-consumer-check-period](slow-consumers.md) | How often to check for "slow" consumers | 5
[auto-create-jms-queues](address-model.md#configuring-addresses-and-queues-via-address-settings)| **deprecated** Create JMS queues automatically; see `auto-create-queues` & `auto-create-addresses` | `true`
[auto-delete-jms-queues](address-model.md#configuring-addresses-and-queues-via-address-settings)| **deprecated** Delete JMS queues automatically; see `auto-create-queues` & `auto-create-addresses` | `true`
[auto-create-jms-topics](address-model.md#configuring-addresses-and-queues-via-address-settings)| **deprecated** Create JMS topics automatically; see `auto-create-queues` & `auto-create-addresses` | `true`
[auto-delete-jms-topics](address-model.md#configuring-addresses-and-queues-via-address-settings)| **deprecated** Delete JMS topics automatically; see `auto-create-queues` & `auto-create-addresses` | `true`
[auto-create-queues](address-model.md#configuring-addresses-and-queues-via-address-settings) | Create queues automatically | `true`
[auto-delete-queues](address-model.md#configuring-addresses-and-queues-via-address-settings) | Delete queues automatically | `true`
[config-delete-queues](config-reload.md)| How to deal with queues deleted from XML at runtime| `OFF`
[auto-create-addresses](address-model.md#configuring-addresses-and-queues-via-address-settings) | Create addresses automatically | `true`
[auto-delete-addresses](address-model.md#configuring-addresses-and-queues-via-address-settings) | Delete addresses automatically | `true`
[config-delete-addresses](config-reload.md) | How to deal with addresses deleted from XML at runtime | `OFF`
[management-browse-page-size]() | Number of messages a management resource can browse| 200
[default-purge-on-no-consumers](address-model.md#non-durable-subscription-queue) | `purge-on-no-consumers` value if none is set on the queue | `false`
[default-max-consumers](address-model.md#shared-durable-subscription-queue-using-max-consumers) | `max-consumers` value if none is set on the queue | -1
[default-queue-routing-type](address-model.md#routing-type) | Routing type for auto-created queues if the type can't be otherwise determined | `MULTICAST`
[default-address-routing-type](address-model.md#routing-type) | Routing type for auto-created addresses if the type can't be otherwise determined | `MULTICAST`
#address-setting type
## bridge type
Name | Description
:--- | :---
[match ](address-model.md "Configuring Queues Via Address Settings") | The filter to apply to the setting
[dead-letter-address](undelivered-messages.md "Configuring Dead Letter Addresses") | dead letter address
[expiry-address](message-expiry.md "Configuring Expiry Addresses") | expired messages address
[expiry-delay](address-model.md "Configuring Queues Via Address Settings") | expiration time override, -1 don't override with default=-1
[redelivery-delay](undelivered-messages.md "Configuring Delayed Redelivery") | time to redeliver a message (in ms) with default=0
[redelivery-delay-multiplier](address-model.md "Configuring Queues Via Address Settings") | multiplier to apply to the "redelivery-delay"
[max-redelivery-delay](address-model.md "Configuring Queues Via Address Settings") | Max value for the redelivery-delay
[max-delivery-attempts](undelivered-messages.md "Configuring Dead Letter Addresses") | Number of retries before dead letter address, default=10
[max-size-bytes](paging.md "Paging") | Limit before paging. -1 = infinite
[page-size-bytes](paging.md "Paging") | Size of each file on page, default=10485760
[page-max-cache-size](paging.md "Paging") | Maximum number of files cached from paging default=5
[address-full-policy](address-model.md "Configuring Queues Via Address Settings") | Model to chose after queue full
[message-counter-history-day-limit](address-model.md "Configuring Queues Via Address Settings") | Days to keep in history
[last-value-queue](last-value-queues.md "Last-Value Queues") | Queue is a last value queue, default=false
[redistribution-delay](clusters.md "Clusters") | Timeout before redistributing values after no consumers. default=-1
[send-to-dla-on-no-route](address-model.md "Configuring Queues Via Address Settings") | Forward messages to DLA when no queues subscribing. default=false
Name | Description | Default
---|---|---
[name ](core-bridges.md)| unique name | n/a
[queue-name](core-bridges.md) | name of queue that this bridge consumes from | n/a
[forwarding-address](core-bridges.md) | address to forward to. If omitted original address is used | n/a
[ha](core-bridges.md)| whether this bridge supports fail-over | `false`
[filter](core-bridges.md) | optional core filter expression | n/a
[transformer-class-name](core-bridges.md) | optional name of transformer class | n/a
[min-large-message-size](core-bridges.md) | Limit before message is considered large. | 100KB
[check-period](connection-ttl.md)| How often to check for [TTL](https://en.wikipedia.org/wiki/Time_to_live) violation. -1 means disabled. | 30000
[connection-ttl](connection-ttl.md) | [TTL](https://en.wikipedia.org/wiki/Time_to_live "Time to Live") for the Bridge. This should be greater than the ping period. | 60000
[retry-interval](core-bridges.md)| period (in ms) between successive retries. | 2000
[retry-interval-multiplier](core-bridges.md) | multiplier to apply to successive retry intervals. | 1
[max-retry-interval](core-bridges.md) | Limit to the retry-interval growth. | 2000
[reconnect-attempts](core-bridges.md) | maximum number of retry attempts.| -1 (no limit)
[use-duplicate-detection](core-bridges.md)| forward duplicate detection headers? | `true`
[confirmation-window-size](core-bridges.md) | number of bytes before confirmations are sent. | 1MB
[producer-window-size](core-bridges.md)| Producer flow control size on the bridge. | -1 (disabled)
[user](core-bridges.md) | Username for the bridge, the default is the cluster username. | n/a
[password](core-bridges.md)| Password for the bridge, default is the cluster password. | n/a
[reconnect-attempts-same-node](core-bridges.md) | Number of retries before trying another node. | 10
#bridge type
Name | Description
:--- | :---
[name ](core-bridges.md "Core Bridges") | unique name
[queue-name](core-bridges.md "Core Bridges") | name of queue that this bridge consumes from
[forwarding-address](core-bridges.md "Core Bridges") | address to forward to. If omitted original address is used
[ha](core-bridges.md "Core Bridges") | whether this bridge supports fail-over
[filter](core-bridges.md "Core Bridges") | optional core filter expression
[transformer-class-name](core-bridges.md "Core Bridges") | optional name of transformer class
[min-large-message-size](core-bridges.md "Core Bridges") | Limit before message is considered large. default 100KB
[check-period](connection-ttl.md "Detecting Dead Connections") | [TTL](https://en.wikipedia.org/wiki/Time_to_live "Time to Live") check period for the bridge. -1 means disabled. default 30000 (ms)
[connection-ttl](connection-ttl.md "Detecting Dead Connections") | [TTL](https://en.wikipedia.org/wiki/Time_to_live "Time to Live") for the Bridge. This should be greater than the ping period. default 60000 (ms)
[retry-interval](core-bridges.md "Core Bridges") | period (in ms) between successive retries. default 2000
[retry-interval-multiplier](core-bridges.md "Core Bridges") | multiplier to apply to successive retry intervals. default 1
[max-retry-interval](core-bridges.md "Core Bridges") | Limit to the retry-interval growth. default 2000
[reconnect-attempts](core-bridges.md "Core Bridges") | maximum number of retry attempts, -1 means 'no limits'. default -1
[use-duplicate-detection](core-bridges.md "Core Bridges") | forward duplicate detection headers?. default true
[confirmation-window-size](core-bridges.md "Core Bridges") | number of bytes before confirmations are sent. default 1MB
[producer-window-size](core-bridges.md "Core Bridges") | Producer flow control size on the bridge. Default -1 (disabled)
[user](core-bridges.md "Core Bridges") | Username for the bridge, the default is the cluster username
[password](core-bridges.md "Core Bridges") | Password for the bridge, default is the cluster password
[reconnect-attempts-same-node](core-bridges.md "Core Bridges") | Number of retries before trying another node. default 10
# broadcast-group type
## broadcast-group type
Name | Type
:--- | :---
[name ](clusters.md "Clusters") | unique name
[local-bind-address](clusters.md "Clusters") | local bind address that the datagram socket is bound to
[local-bind-port](clusters.md "Clusters") | local port to which the datagram socket is bound to
[group-address](clusters.md "Clusters") | multicast address to which the data will be broadcast
[group-port](clusters.md "Clusters") | UDP port number used for broadcasting
[broadcast-period](clusters.md "Clusters") | period in milliseconds between consecutive broadcasts. default 2000
[jgroups-file](clusters.md) | Name of JGroups configuration file
[jgroups-channel](clusters.md) | Name of JGroups Channel
[connector-ref](clusters.md "Clusters") |
---|---
[name ](clusters.md) | unique name
[local-bind-address](clusters.md) | Local bind address that the datagram socket is bound to.
[local-bind-port](clusters.md) | Local port to which the datagram socket is bound to.
[group-address](clusters.md)| Multicast address to which the data will be broadcast.
[group-port](clusters.md)| UDP port number used for broadcasting.
[broadcast-period](clusters.md)| Period in milliseconds between consecutive broadcasts. Default=2000.
[jgroups-file](clusters.md) | Name of JGroups configuration file.
[jgroups-channel](clusters.md) | Name of JGroups Channel.
[connector-ref](clusters.md)| The `connector` to broadcast.
#cluster-connection type
## cluster-connection type
Name | Description | Default
---|---|---
[name](clusters.md) | unique name | n/a
[address](clusters.md) | name of the address this cluster connection applies to | n/a
[connector-ref](clusters.md) | Name of the connector reference to use. | n/a
[check-period](connection-ttl.md) | The period (in milliseconds) used to check if the cluster connection has failed to receive pings from another server | 30000
[connection-ttl](connection-ttl.md)| Timeout for TTL. | 60000
[min-large-message-size](large-messages.md) | Messages larger than this are considered large-messages. | 100KB
[call-timeout](clusters.md) | Time(ms) before giving up on blocked calls. | 30000
[retry-interval](clusters.md)| period (in ms) between successive retries. | 500
[retry-interval-multiplier](clusters.md) | multiplier to apply to the retry-interval. | 1
[max-retry-interval](clusters.md) | Maximum value for retry-interval. | 2000
[reconnect-attempts](clusters.md) | How many attempts should be made to reconnect after failure. | -1
[use-duplicate-detection](clusters.md)| should duplicate detection headers be inserted in forwarded messages? | `true`
[message-load-balancing](clusters.md) | how should messages be load balanced? | `OFF`
[max-hops](clusters.md)| maximum number of hops cluster topology is propagated. | 1
[confirmation-window-size](client-reconnection.md#client-reconnection-and-session-reattachment)| The size (in bytes) of the window used for confirming data from the server connected to. | 1048576
[producer-window-size](clusters.md)| Flow Control for the Cluster connection bridge. | -1 (disabled)
[call-failover-timeout](clusters.md#configuring-cluster-connections)| How long to wait for a reply if in the middle of a fail-over. -1 means wait forever. | -1
[notification-interval](clusters.md) | how often the cluster connection will notify the cluster of its existence right after joining the cluster. | 1000
[notification-attempts](clusters.md) | how many times this cluster connection will notify the cluster of its existence right after joining the cluster | 2
## discovery-group type
Name | Description
:--- | :---
[name](clusters.md "Clusters") | unique name
[address](clusters.md "Clusters") | name of the address this cluster connection applies to
[connector-ref](clusters.md "Clusters") | Name of the connector reference to use.
[check-period](connection-ttl.md "Detecting Dead Connections") | The period (in milliseconds) used to check if the cluster connection has failed to receive pings from another server with default = 30000
[connection-ttl](connection-ttl.md "Detecting Dead Connections") | Timeout for TTL. Default 60000
[min-large-message-size](large-messages.md "Large Messages") | Messages larger than this are considered large-messages, default=100KB
[call-timeout](clusters.md "Clusters") | Time(ms) before giving up on blocked calls. Default=30000
[retry-interval](clusters.md "Clusters") | period (in ms) between successive retries. Default=500
[retry-interval-multiplier](clusters.md "Clusters") | multiplier to apply to the retry-interval. Default=1
[max-retry-interval](clusters.md "Clusters") | Maximum value for retry-interval. Default=2000
[reconnect-attempts](clusters.md "Clusters") | How many attempts should be made to reconnect after failure. Default=-1
[use-duplicate-detection](clusters.md "Clusters") | should duplicate detection headers be inserted in forwarded messages?. Default=true
[message-load-balancing](clusters.md "Clusters") | how should messages be load balanced? Default=OFF
[max-hops](clusters.md "Clusters") | maximum number of hops cluster topology is propagated. Default=1
[confirmation-window-size](client-reconnection.md "Client Reconnection and Session Reattachment")| The size (in bytes) of the window used for confirming data from the server connected to. Default 1048576
[producer-window-size](clusters.md "Clusters") | Flow Control for the Cluster connection bridge. Default -1 (disabled)
[call-failover-timeout](clusters.md "Configuring Cluster Connections") | How long to wait for a reply if in the middle of a fail-over. -1 means wait forever. Default -1
[notification-interval](clusters.md "Clusters") | how often the cluster connection will notify the cluster of its existence right after joining the cluster. Default 1000
[notification-attempts](clusters.md "Clusters") | how many times this cluster connection will notify the cluster of its existence right after joining the cluster Default 2
---|---
[name](clusters.md)| unique name
[group-address](clusters.md)| Multicast IP address of the group to listen on
[group-port](clusters.md)| UDP port number of the multi cast group
[jgroups-file](clusters.md) | Name of a JGroups configuration file. If specified, the server uses JGroups for discovery.
[jgroups-channel](clusters.md) | Name of a JGroups Channel. If specified, the server uses the named channel for discovery.
[refresh-timeout]()| Period the discovery group waits after receiving the last broadcast from a particular server before removing that servers connector pair entry from its list. Default=10000
[local-bind-address](clusters.md) | local bind address that the datagram socket is bound to
[local-bind-port](clusters.md) | local port to which the datagram socket is bound to. Default=-1
initial-wait-timeout | time to wait for an initial broadcast to give us at least one node in the cluster. Default=10000
#discovery-group type
## divert type
Name | Description
:--- | :---
[name](clusters.md "Clusters") | unique name
[group-address](clusters.md "Clusters") | Multicast IP address of the group to listen on
[group-port](clusters.md "Clusters") | UDP port number of the multi cast group
[jgroups-file](clusters.md) | Name of a JGroups configuration file. If specified, the server uses JGroups for discovery.
[jgroups-channel](clusters.md) | Name of a JGroups Channel. If specified, the server uses the named channel for discovery.
[refresh-timeout]() | Period the discovery group waits after receiving the last broadcast from a particular server before removing that servers connector pair entry from its list. Default=10000
[local-bind-address](clusters.md "Clusters") | local bind address that the datagram socket is bound to
[local-bind-port](clusters.md "Clusters") | local port to which the datagram socket is bound to. Default=-1
[initial-wait-timeout]() | time to wait for an initial broadcast to give us at least one node in the cluster. Default=10000
---|---
[name](diverts.md) | unique name
[transformer-class-name](diverts.md) | an optional class name of a transformer
[exclusive](diverts.md) | whether this is an exclusive divert. Default=false
[routing-name](diverts.md) | the routing name for the divert
[address](diverts.md) | the address this divert will divert from
[forwarding-address](diverts.md) | the forwarding address for the divert
[filter](diverts.md) | optional core filter expression
#divert type
## address type
Name | Description
:--- | :---
[name](diverts.md "Diverting and Splitting Message Flows") | unique name
[transformer-class-name](diverts.md "Diverting and Splitting Message Flows") | an optional class name of a transformer
[exclusive](diverts.md "Diverting and Splitting Message Flows") | whether this is an exclusive divert. Default=false
[routing-name](diverts.md "Diverting and Splitting Message Flows") | the routing name for the divert
[address](diverts.md "Diverting and Splitting Message Flows") | the address this divert will divert from
[forwarding-address](diverts.md "Diverting and Splitting Message Flows") | the forwarding address for the divert
[filter](diverts.md "Diverting and Splitting Message Flows")| optional core filter expression
---|---
name | unique name | n/a
[anycast](address-model.md)| list of anycast [queues](#queue-type)
[multicast](address-model.md) | list of multicast [queues](#queue-type)
#queue type
## queue type
Name | Description | Default
---|---|---
name | unique name | n/a
filter | optional core filter expression | n/a
durable | whether the queue is durable (persistent). | `true`
user | the name of the user to associate with the creation of the queue | n/a
[max-consumers](address-model.md#shared-durable-subscription-queue-using-max-consumers) | the max number of consumers allowed on this queue | -1 (no max)
[purge-on-no-consumers](address-model.md#non-durable-subscription-queue) | whether or not to delete all messages and prevent routing when no consumers are connected | `false`
[exclusive](exclusive-queues.md) | only deliver messages to one of the connected consumers | `false`
[last-value](last-value-queues.md) | use last-value semantics | `false`
## security-setting type
Name | Description
:--- | :---
[name ](address-model.md "Predefined Queues") | unique name
[address](address-model.md "Predefined Queues") | address for the queue
[filter](address-model.md "Predefined Queues") | optional core filter expression
[durable](address-model.md "Predefined Queues") | whether the queue is durable (persistent). Default=true
---|---
[match](security.md)| [address expression](wildcard-syntax.md)
[permission](security.md) |
[permission.type](security.md) | the type of permission
[permission.roles](security.md) | a comma-separated list of roles to apply the permission to
#security-setting type
## broker-plugin type
Name | Description
:--- | :---
[match ](security.md "Role based security for addresses") | [address expression](wildcard-syntax.md)
[permission](security.md "Role based security for addresses") |
[permission.type ](security.md "Role based security for addresses") | the type of permission
[permission.roles ](security.md "Role based security for addresses") | a comma-separated list of roles to apply the permission to
---|---
[property](broker-plugins.md#registering-a-plugin)| properties to configure a plugin
[class-name](broker-plugins.md#registering-a-plugin) | the name of the broker plugin class to instantiate
## resource-limit type
Name | Description | Default
---|---|---
[match](resource-limits.md#configuring-limits-via-resource-limit-settings)| the name of the user to whom the limits should be applied | n/a
[max-connections](resource-limits.md#configuring-limits-via-resource-limit-settings)| how many connections are allowed by the matched user | -1 (no max)
[max-queues](resource-limits.md#configuring-limits-via-resource-limit-settings)| how many queues can be created by the matched user | -1 (no max)
## grouping-handler type
Name | Description | Default
---|---|---
[name](message-grouping.md#clustered-grouping) | A unique name | n/a
[type](message-grouping.md#clustered-grouping) | `LOCAL` or `REMOTE` | n/a
[address](message-grouping.md#clustered-grouping) | A reference to a `cluster-connection` `address` | n/a
[timeout](message-grouping.md#clustered-grouping) | How long to wait for a decision | 5000
[group-timeout](message-grouping.md#clustered-grouping) | How long a group binding will be used. | -1 (disabled)
[reaper-period](message-grouping.md#clustered-grouping) | How often the reaper will be run to check for timed out group bindings. Only valid for `LOCAL` handlers. | 30000

View File

@ -1,43 +1,39 @@
# Configuring the Transport
In this chapter we'll describe the concepts required for understanding
Apache ActiveMQ Artemis transports and where and how they're configured.
In this chapter we'll describe the concepts required for understanding Apache
ActiveMQ Artemis transports and where and how they're configured.
## Acceptors
One of the most important concepts in Apache ActiveMQ Artemis transports is the
*acceptor*. Let's dive straight in and take a look at an acceptor
defined in xml in the configuration file `broker.xml`.
*acceptor*. Let's dive straight in and take a look at an acceptor defined in
xml in the configuration file `broker.xml`.
```xml
<acceptors>
<acceptor name="netty">tcp://localhost:61617</acceptor>
</acceptors>
<acceptor name="netty">tcp://localhost:61617</acceptor>
```
Acceptors are always defined inside an `acceptors` element. There can be
one or more acceptors defined in the `acceptors` element. There's no
upper limit to the number of acceptors per server.
Acceptors are always defined inside an `acceptors` element. There can be one or
more acceptors defined in the `acceptors` element. There's no upper limit to
the number of acceptors per server.
Each acceptor defines a way in which connections can be made to the
Apache ActiveMQ Artemis server.
Each acceptor defines a way in which connections can be made to the Apache
ActiveMQ Artemis server.
In the above example we're defining an acceptor that uses
[Netty](https://netty.io/) to listen for connections at port
`61617`.
[Netty](https://netty.io/) to listen for connections at port `61617`.
The `acceptor` element contains a `URL` that defines the kind of Acceptor
to create along with its configuration. The `schema` part of the `URL`
defines the Acceptor type which can either be `tcp` or `vm` which is
`Netty` or an In VM Acceptor respectively. For `Netty` the host and the
port of the `URL` define what host and port the `acceptor` will bind to. For
In VM the `Authority` part of the `URL` defines a unique server id.
The `acceptor` element contains a `URL` that defines the kind of Acceptor to
create along with its configuration. The `schema` part of the `URL` defines the
Acceptor type which can either be `tcp` or `vm` which is `Netty` or an In VM
Acceptor respectively. For `Netty` the host and the port of the `URL` define
what host and port the `acceptor` will bind to. For In VM the `Authority` part
of the `URL` defines a unique server id.
The `acceptor` can also be configured with a set of key=value pairs
used to configure the specific transport, the set of
valid key=value pairs depends on the specific transport be used and are
passed straight through to the underlying transport. These are set on the
`URL` as part of the query, like so:
The `acceptor` can also be configured with a set of key=value pairs used to
configure the specific transport, the set of valid key=value pairs depends on
the specific transport be used and are passed straight through to the
underlying transport. These are set on the `URL` as part of the query, like so:
```xml
<acceptor name="netty">tcp://localhost:61617?sslEnabled=true&keyStorePath=/path</acceptor>
@ -45,43 +41,41 @@ passed straight through to the underlying transport. These are set on the
## Connectors
Whereas acceptors are used on the server to define how we accept
connections, connectors are used to define how to connect to a server.
Whereas acceptors are used on the server to define how we accept connections,
connectors are used to define how to connect to a server.
Let's look at a connector defined in our `broker.xml` file:
```xml
<connectors>
<connector name="netty">tcp://localhost:61617</connector>
</connectors>
<connector name="netty">tcp://localhost:61617</connector>
```
Connectors can be defined inside a `connectors` element. There can be
one or more connectors defined in the `connectors` element. There's no
upper limit to the number of connectors per server.
Connectors can be defined inside a `connectors` element. There can be one or
more connectors defined in the `connectors` element. There's no upper limit to
the number of connectors per server.
A `connector` is used when the server acts as a client itself, e.g.:
- When one server is bridged to another
- When a server takes part in a cluster
- When one server is bridged to another
- When a server takes part in a cluster
In these cases the server needs to know how to connect to other servers.
That's defined by `connectors`.
## Configuring the Transport Directly from the Client
How do we configure a core `ClientSessionFactory` with the information
that it needs to connect with a server?
How do we configure a core `ClientSessionFactory` with the information that it
needs to connect with a server?
Connectors are also used indirectly when configuring a core
`ClientSessionFactory` to directly talk to a server. Although in this
case there's no need to define such a connector in the server side
configuration, instead we just specify the appropriate URI.
`ClientSessionFactory` to directly talk to a server. Although in this case
there's no need to define such a connector in the server side configuration,
instead we just specify the appropriate URI.
Here's an example of creating a `ClientSessionFactory` which will
connect directly to the acceptor we defined earlier in this chapter, it
uses the standard Netty TCP transport and will try and connect on port
61617 to localhost (default):
Here's an example of creating a `ClientSessionFactory` which will connect
directly to the acceptor we defined earlier in this chapter, it uses the
standard Netty TCP transport and will try and connect on port 61617 to
localhost (default):
```java
ServerLocator locator = ActiveMQClient.createServerLocator("tcp://localhost:61617");
@ -91,8 +85,8 @@ ClientSessionFactory sessionFactory = locator.createClientSessionFactory();
ClientSession session = sessionFactory.createSession(...);
```
Similarly, if you're using JMS, you can configure the JMS connection
factory directly on the client side:
Similarly, if you're using JMS, you can configure the JMS connection factory
directly on the client side:
```java
ConnectionFactory connectionFactory = new ActiveMQConnectionFactory("tcp://localhost:61617");
@ -103,8 +97,7 @@ Connection jmsConnection = connectionFactory.createConnection();
## Configuring the Netty transport
Out of the box, Apache ActiveMQ Artemis currently uses
[Netty](https://netty.io/), a high performance low level
network library.
[Netty](https://netty.io/), a high performance low level network library.
Our Netty transport can be configured in several different ways; to use
straightforward TCP sockets, SSL, or to tunnel over HTTP or HTTPS..
@ -113,14 +106,14 @@ We believe this caters for the vast majority of transport requirements.
### Single Port Support
Apache ActiveMQ Artemis supports using a single port for all
protocols, Apache ActiveMQ Artemis will automatically detect which protocol is being
used CORE, AMQP, STOMP or OPENWIRE and use the appropriate Apache ActiveMQ Artemis
handler. It will also detect whether protocols such as HTTP or Web
Sockets are being used and also use the appropriate decoders
Apache ActiveMQ Artemis supports using a single port for all protocols, Apache
ActiveMQ Artemis will automatically detect which protocol is being used CORE,
AMQP, STOMP or OPENWIRE and use the appropriate Apache ActiveMQ Artemis
handler. It will also detect whether protocols such as HTTP or Web Sockets are
being used and also use the appropriate decoders
It is possible to limit which protocols are supported by using the
`protocols` parameter on the Acceptor like so:
It is possible to limit which protocols are supported by using the `protocols`
parameter on the Acceptor like so:
```xml
<acceptor name="netty">tcp://localhost:61617?protocols=CORE,AMQP</acceptor>
@ -129,348 +122,344 @@ It is possible to limit which protocols are supported by using the
### Configuring Netty TCP
Netty TCP is a simple unencrypted TCP sockets based transport. If you're
running connections across an untrusted network please bear in
mind this transport is unencrypted. You may want to look at the SSL or
HTTPS configurations.
running connections across an untrusted network please bear in mind this
transport is unencrypted. You may want to look at the SSL or HTTPS
configurations.
With the Netty TCP transport all connections are initiated from the
client side (i.e. the server does not initiate any connections to the
client). This works well with firewall policies that typically only allow
connections to be initiated in one direction.
With the Netty TCP transport all connections are initiated from the client side
(i.e. the server does not initiate any connections to the client). This works
well with firewall policies that typically only allow connections to be
initiated in one direction.
All the valid keys for the `tcp` URL scheme used for Netty are defined in the
class `org.apache.activemq.artemis.core.remoting.impl.netty.TransportConstants`.
Most parameters can be used either with acceptors or connectors, some only
work with acceptors. The following parameters can be used to configure
Netty for simple TCP:
class
`org.apache.activemq.artemis.core.remoting.impl.netty.TransportConstants`.
Most parameters can be used either with acceptors or connectors, some only work
with acceptors. The following parameters can be used to configure Netty for
simple TCP:
> **Note**
> **Note:**
>
> The `host` and `port` parameters are only used in the core API, in
> XML configuration these are set in the URI host and port.
> The `host` and `port` parameters are only used in the core API, in XML
> configuration these are set in the URI host and port.
- `host`. This specifies the host name or IP address to connect to
(when configuring a connector) or to listen on (when configuring an
acceptor). The default value for this property is `localhost`. When
configuring acceptors, multiple hosts or IP addresses can be
specified by separating them with commas. It is also possible to
specify `0.0.0.0` to accept connection from all the host's network
interfaces. It's not valid to specify multiple addresses when
specifying the host for a connector; a connector makes a connection
to one specific address.
- `host`. This specifies the host name or IP address to connect to (when
configuring a connector) or to listen on (when configuring an acceptor). The
default value for this property is `localhost`. When configuring acceptors,
multiple hosts or IP addresses can be specified by separating them with commas.
It is also possible to specify `0.0.0.0` to accept connection from all the
host's network interfaces. It's not valid to specify multiple addresses when
specifying the host for a connector; a connector makes a connection to one
specific address.
> **Note**
>
> Don't forget to specify a host name or IP address! If you want
> your server able to accept connections from other nodes you must
> specify a hostname or IP address at which the acceptor will bind
> and listen for incoming connections. The default is localhost
> which of course is not accessible from remote nodes!
> **Note:**
>
> Don't forget to specify a host name or IP address! If you want your server
> able to accept connections from other nodes you must specify a hostname or
> IP address at which the acceptor will bind and listen for incoming
> connections. The default is localhost which of course is not accessible
> from remote nodes!
- `port`. This specified the port to connect to (when configuring a
connector) or to listen on (when configuring an acceptor). The
default value for this property is `61616`.
- `port`. This specified the port to connect to (when configuring a connector)
or to listen on (when configuring an acceptor). The default value for this
property is `61616`.
- `tcpNoDelay`. If this is `true` then [Nagle's
algorithm](https://en.wikipedia.org/wiki/Nagle%27s_algorithm) will be
disabled. This is a [Java (client) socket
option](https://docs.oracle.com/javase/8/docs/technotes/guides/net/socketOpt.html).
The default value for this property is `true`.
- `tcpNoDelay`. If this is `true` then [Nagle's
algorithm](https://en.wikipedia.org/wiki/Nagle%27s_algorithm) will be
disabled. This is a [Java (client) socket
option](https://docs.oracle.com/javase/8/docs/technotes/guides/net/socketOpt.html).
The default value for this property is `true`.
- `tcpSendBufferSize`. This parameter determines the size of the
TCP send buffer in bytes. The default value for this property is
`32768` bytes (32KiB).
- `tcpSendBufferSize`. This parameter determines the size of the TCP send
buffer in bytes. The default value for this property is `32768` bytes
(32KiB).
TCP buffer sizes should be tuned according to the bandwidth and
latency of your network. Here's a good link that explains the theory
behind [this](http://www-didc.lbl.gov/TCP-tuning/).
TCP buffer sizes should be tuned according to the bandwidth and latency of
your network. Here's a good link that explains the theory behind
[this](http://www-didc.lbl.gov/TCP-tuning/).
In summary TCP send/receive buffer sizes should be calculated as:
In summary TCP send/receive buffer sizes should be calculated as:
buffer_size = bandwidth * RTT.
buffer_size = bandwidth * RTT.
Where bandwidth is in *bytes per second* and network round trip time
(RTT) is in seconds. RTT can be easily measured using the `ping`
utility.
Where bandwidth is in *bytes per second* and network round trip time (RTT) is
in seconds. RTT can be easily measured using the `ping` utility.
For fast networks you may want to increase the buffer sizes from the
defaults.
For fast networks you may want to increase the buffer sizes from the
defaults.
- `tcpReceiveBufferSize`. This parameter determines the size of the
TCP receive buffer in bytes. The default value for this property is
`32768` bytes (32KiB).
- `tcpReceiveBufferSize`. This parameter determines the size of the TCP receive
buffer in bytes. The default value for this property is `32768` bytes
(32KiB).
- `writeBufferLowWaterMark`. This parameter determines the low water mark of
the Netty write buffer. Once the number of bytes queued in the write buffer exceeded
the high water mark and then dropped down below this value, Netty's channel
will start to be writable again. The default value for this property is
`32768` bytes (32KiB).
- `writeBufferLowWaterMark`. This parameter determines the low water mark of
the Netty write buffer. Once the number of bytes queued in the write buffer
exceeded the high water mark and then dropped down below this value, Netty's
channel will start to be writable again. The default value for this property is
`32768` bytes (32KiB).
- `writeBufferHighWaterMark`. This parameter determines the high water mark of
the Netty write buffer. If the number of bytes queued in the write buffer exceeds
this value, Netty's channel will start to be not writable. The default value for
this property is `131072` bytes (128KiB).
- `writeBufferHighWaterMark`. This parameter determines the high water mark of
the Netty write buffer. If the number of bytes queued in the write buffer
exceeds this value, Netty's channel will start to be not writable. The default
value for this property is `131072` bytes (128KiB).
- `batchDelay`. Before writing packets to the transport, Apache ActiveMQ Artemis can
be configured to batch up writes for a maximum of `batchDelay`
milliseconds. This can increase overall throughput for very small
messages. It does so at the expense of an increase in average
latency for message transfer. The default value for this property is
`0` ms.
- `batchDelay`. Before writing packets to the transport, Apache ActiveMQ
Artemis can be configured to batch up writes for a maximum of `batchDelay`
milliseconds. This can increase overall throughput for very small messages. It
does so at the expense of an increase in average latency for message transfer.
The default value for this property is `0` ms.
- `directDeliver`. When a message arrives on the server and is
delivered to waiting consumers, by default, the delivery is done on
the same thread as that on which the message arrived. This gives
good latency in environments with relatively small messages and a
small number of consumers, but at the cost of overall throughput and
scalability - especially on multi-core machines. If you want the
lowest latency and a possible reduction in throughput then you can
use the default value for `directDeliver` (i.e. `true`). If you are
willing to take some small extra hit on latency but want the highest
throughput set `directDeliver` to `false`.
- `directDeliver`. When a message arrives on the server and is delivered to
waiting consumers, by default, the delivery is done on the same thread as
that on which the message arrived. This gives good latency in environments with
relatively small messages and a small number of consumers, but at the cost of
overall throughput and scalability - especially on multi-core machines. If you
want the lowest latency and a possible reduction in throughput then you can use
the default value for `directDeliver` (i.e. `true`). If you are willing to take
some small extra hit on latency but want the highest throughput set
`directDeliver` to `false`.
- `nioRemotingThreads` This is deprecated. It is replaced by `remotingThreads`,
if you are using this please update your configuration
- `nioRemotingThreads` This is deprecated. It is replaced by `remotingThreads`,
if you are using this please update your configuration
- `remotingThreads`. Apache ActiveMQ Artemis will,
by default, use a number of threads equal to three times the number
of cores (or hyper-threads) as reported by
`Runtime.getRuntime().availableProcessors()` for processing incoming
packets. If you want to override this value, you can set the number
of threads by specifying this parameter. The default value for this
parameter is `-1` which means use the value from
`Runtime.getRuntime().availableProcessors()` \* 3.
- `remotingThreads`. Apache ActiveMQ Artemis will, by default, use a number of
threads equal to three times the number of cores (or hyper-threads) as
reported by `Runtime.getRuntime().availableProcessors()` for processing
incoming packets. If you want to override this value, you can set the number of
threads by specifying this parameter. The default value for this parameter is
`-1` which means use the value from
`Runtime.getRuntime().availableProcessors()` \* 3.
- `localAddress`. When configured a Netty Connector it is possible to
specify which local address the client will use when connecting to
the remote address. This is typically used in the Application Server
or when running Embedded to control which address is used for
outbound connections. If the local-address is not set then the
connector will use any local address available
- `localAddress`. When configured a Netty Connector it is possible to specify
which local address the client will use when connecting to the remote
address. This is typically used in the Application Server or when running
Embedded to control which address is used for outbound connections. If the
local-address is not set then the connector will use any local address
available
- `localPort`. When configured a Netty Connector it is possible to
specify which local port the client will use when connecting to the
remote address. This is typically used in the Application Server or
when running Embedded to control which port is used for outbound
connections. If the local-port default is used, which is 0, then the
connector will let the system pick up an ephemeral port. valid ports
are 0 to 65535
- `localPort`. When configured a Netty Connector it is possible to specify
which local port the client will use when connecting to the remote address.
This is typically used in the Application Server or when running Embedded to
control which port is used for outbound connections. If the local-port default
is used, which is 0, then the connector will let the system pick up an
ephemeral port. valid ports are 0 to 65535
- `connectionsAllowed`. This is only valid for acceptors. It limits the
number of connections which the acceptor will allow. When this limit
is reached a DEBUG level message is issued to the log, and the connection
is refused. The type of client in use will determine what happens when
the connection is refused. In the case of a `core` client, it will
result in a `org.apache.activemq.artemis.api.core.ActiveMQConnectionTimedOutException`.
- `connectionsAllowed`. This is only valid for acceptors. It limits the number
of connections which the acceptor will allow. When this limit is reached a
DEBUG level message is issued to the log, and the connection is refused. The
type of client in use will determine what happens when the connection is
refused. In the case of a `core` client, it will result in a
`org.apache.activemq.artemis.api.core.ActiveMQConnectionTimedOutException`.
- `handshake-timeout`. Prevents an unauthorised client opening a large
number of connections and just keeping them open. As connections each
require a file handle this consumes resources that are then unavailable
to other clients. Once the connection is authenticated, the usual rules
can be enforced regarding resource consumption. Default value is set to
10 seconds. Each integer is valid value. When set value to zero or
negative integer this feature is turned off. Changing value needs
to restart server to take effect.
- `handshake-timeout`. Prevents an unauthorised client opening a large number
of connections and just keeping them open. As connections each require a file
handle this consumes resources that are then unavailable to other clients. Once
the connection is authenticated, the usual rules can be enforced regarding
resource consumption. Default value is set to 10 seconds. Each integer is valid
value. When set value to zero or negative integer this feature is turned off.
Changing value needs to restart server to take effect.
### Configuring Netty Native Transport
Netty Native Transport support exists for selected OS platforms.
This allows Apache ActiveMQ Artemis to use native sockets/io instead of Java NIO.
Netty Native Transport support exists for selected OS platforms. This allows
Apache ActiveMQ Artemis to use native sockets/io instead of Java NIO.
These Native transports add features specific to a particular platform,
generate less garbage, and generally improve performance when compared to Java NIO based transport.
These Native transports add features specific to a particular platform,
generate less garbage, and generally improve performance when compared to Java
NIO based transport.
Both Clients and Server can benefit from this.
Current Supported Platforms.
- Linux running 64bit JVM
- MacOS running 64bit JVM
- Linux running 64bit JVM
- MacOS running 64bit JVM
Apache ActiveMQ Artemis will by default enable the corresponding native transport if a supported platform is detected.
Apache ActiveMQ Artemis will by default enable the corresponding native
transport if a supported platform is detected.
If running on an unsupported platform or any issues loading native libs, Apache ActiveMQ Artemis will fallback onto Java NIO.
If running on an unsupported platform or any issues loading native libs, Apache
ActiveMQ Artemis will fallback onto Java NIO.
#### Linux Native Transport
On supported Linux platforms Epoll is used, @see https://en.wikipedia.org/wiki/Epoll.
On supported Linux platforms Epoll is used, @see
https://en.wikipedia.org/wiki/Epoll.
The following properties are specific to this native transport:
- `useEpoll` enables the use of epoll if a supported linux platform is running a 64bit JVM is detected.
Setting this to `false` will force the use of Java NIO instead of epoll. Default is `true`
- `useEpoll` enables the use of epoll if a supported linux platform is running
a 64bit JVM is detected. Setting this to `false` will force the use of Java
NIO instead of epoll. Default is `true`
#### MacOS Native Transport
On supported MacOS platforms KQueue is used, @see https://en.wikipedia.org/wiki/Kqueue.
On supported MacOS platforms KQueue is used, @see
https://en.wikipedia.org/wiki/Kqueue.
The following properties are specific to this native transport:
- `useKQueue` enables the use of kqueue if a supported MacOS platform running a 64bit JVM is detected.
Setting this to `false` will force the use of Java NIO instead of kqueue. Default is `true`
- `useKQueue` enables the use of kqueue if a supported MacOS platform running a
64bit JVM is detected. Setting this to `false` will force the use of Java
NIO instead of kqueue. Default is `true`
### Configuring Netty SSL
Netty SSL is similar to the Netty TCP transport but it provides
additional security by encrypting TCP connections using the Secure
Sockets Layer SSL
Netty SSL is similar to the Netty TCP transport but it provides additional
security by encrypting TCP connections using the Secure Sockets Layer SSL
Please see the examples for a full working example of using Netty SSL.
Netty SSL uses all the same properties as Netty TCP but adds the
following additional properties:
Netty SSL uses all the same properties as Netty TCP but adds the following
additional properties:
- `sslEnabled`
- `sslEnabled`
Must be `true` to enable SSL. Default is `false`.
Must be `true` to enable SSL. Default is `false`.
- `keyStorePath`
- `keyStorePath`
When used on an `acceptor` this is the path to the SSL key store on
the server which holds the server's certificates (whether
self-signed or signed by an authority).
When used on an `acceptor` this is the path to the SSL key store on the
server which holds the server's certificates (whether self-signed or signed by
an authority).
When used on a `connector` this is the path to the client-side SSL
key store which holds the client certificates. This is only relevant
for a `connector` if you are using 2-way SSL (i.e. mutual
authentication). Although this value is configured on the server, it
is downloaded and used by the client. If the client needs to use a
different path from that set on the server then it can override the
server-side setting by either using the customary
"javax.net.ssl.keyStore" system property or the ActiveMQ-specific
"org.apache.activemq.ssl.keyStore" system property. The
ActiveMQ-specific system property is useful if another component on
client is already making use of the standard, Java system property.
When used on a `connector` this is the path to the client-side SSL key store
which holds the client certificates. This is only relevant for a `connector` if
you are using 2-way SSL (i.e. mutual authentication). Although this value is
configured on the server, it is downloaded and used by the client. If the
client needs to use a different path from that set on the server then it can
override the server-side setting by either using the customary
"javax.net.ssl.keyStore" system property or the ActiveMQ-specific
"org.apache.activemq.ssl.keyStore" system property. The ActiveMQ-specific
system property is useful if another component on client is already making use
of the standard, Java system property.
- `keyStorePassword`
- `keyStorePassword`
When used on an `acceptor` this is the password for the server-side
keystore.
When used on an `acceptor` this is the password for the server-side keystore.
When used on a `connector` this is the password for the client-side
keystore. This is only relevant for a `connector` if you are using
2-way SSL (i.e. mutual authentication). Although this value can be
configured on the server, it is downloaded and used by the client.
If the client needs to use a different password from that set on the
server then it can override the server-side setting by either using
the customary "javax.net.ssl.keyStorePassword" system property or
the ActiveMQ-specific "org.apache.activemq.ssl.keyStorePassword"
system property. The ActiveMQ-specific system property is useful if
another component on client is already making use of the standard,
Java system property.
When used on a `connector` this is the password for the client-side keystore.
This is only relevant for a `connector` if you are using 2-way SSL (i.e. mutual
authentication). Although this value can be configured on the server, it is
downloaded and used by the client. If the client needs to use a different
password from that set on the server then it can override the server-side
setting by either using the customary "javax.net.ssl.keyStorePassword" system
property or the ActiveMQ-specific "org.apache.activemq.ssl.keyStorePassword"
system property. The ActiveMQ-specific system property is useful if another
component on client is already making use of the standard, Java system
property.
- `trustStorePath`
- `trustStorePath`
When used on an `acceptor` this is the path to the server-side SSL
key store that holds the keys of all the clients that the server
trusts. This is only relevant for an `acceptor` if you are using
2-way SSL (i.e. mutual authentication).
When used on an `acceptor` this is the path to the server-side SSL key store
that holds the keys of all the clients that the server trusts. This is only
relevant for an `acceptor` if you are using 2-way SSL (i.e. mutual
authentication).
When used on a `connector` this is the path to the client-side SSL
key store which holds the public keys of all the servers that the
client trusts. Although this value can be configured on the server,
it is downloaded and used by the client. If the client needs to use
a different path from that set on the server then it can override
the server-side setting by either using the customary
"javax.net.ssl.trustStore" system property or the ActiveMQ-specific
"org.apache.activemq.ssl.trustStore" system property. The
ActiveMQ-specific system property is useful if another component on
client is already making use of the standard, Java system property.
When used on a `connector` this is the path to the client-side SSL key store
which holds the public keys of all the servers that the client trusts. Although
this value can be configured on the server, it is downloaded and used by the
client. If the client needs to use a different path from that set on the server
then it can override the server-side setting by either using the customary
"javax.net.ssl.trustStore" system property or the ActiveMQ-specific
"org.apache.activemq.ssl.trustStore" system property. The ActiveMQ-specific
system property is useful if another component on client is already making use
of the standard, Java system property.
- `trustStorePassword`
- `trustStorePassword`
When used on an `acceptor` this is the password for the server-side
trust store. This is only relevant for an `acceptor` if you are
using 2-way SSL (i.e. mutual authentication).
When used on an `acceptor` this is the password for the server-side trust
store. This is only relevant for an `acceptor` if you are using 2-way SSL (i.e.
mutual authentication).
When used on a `connector` this is the password for the client-side
truststore. Although this value can be configured on the server, it
is downloaded and used by the client. If the client needs to use a
different password from that set on the server then it can override
the server-side setting by either using the customary
"javax.net.ssl.trustStorePassword" system property or the
ActiveMQ-specific "org.apache.activemq.ssl.trustStorePassword"
system property. The ActiveMQ-specific system property is useful if
another component on client is already making use of the standard,
Java system property.
When used on a `connector` this is the password for the client-side
truststore. Although this value can be configured on the server, it is
downloaded and used by the client. If the client needs to use a different
password from that set on the server then it can override the server-side
setting by either using the customary "javax.net.ssl.trustStorePassword" system
property or the ActiveMQ-specific "org.apache.activemq.ssl.trustStorePassword"
system property. The ActiveMQ-specific system property is useful if another
component on client is already making use of the standard, Java system
property.
- `enabledCipherSuites`
- `enabledCipherSuites`
Whether used on an `acceptor` or `connector` this is a comma
separated list of cipher suites used for SSL communication. The
default value is `null` which means the JVM's default will be used.
Whether used on an `acceptor` or `connector` this is a comma separated list
of cipher suites used for SSL communication. The default value is `null` which
means the JVM's default will be used.
- `enabledProtocols`
- `enabledProtocols`
Whether used on an `acceptor` or `connector` this is a comma
separated list of protocols used for SSL communication. The default
value is `null` which means the JVM's default will be used.
Whether used on an `acceptor` or `connector` this is a comma separated list
of protocols used for SSL communication. The default value is `null` which
means the JVM's default will be used.
- `needClientAuth`
- `needClientAuth`
This property is only for an `acceptor`. It tells a client
connecting to this acceptor that 2-way SSL is required. Valid values
are `true` or `false`. Default is `false`.
This property is only for an `acceptor`. It tells a client connecting to this
acceptor that 2-way SSL is required. Valid values are `true` or `false`.
Default is `false`.
Note that this property takes precedence over `wantClientAuth` and if
its value is set to true then `wantClientAuth` will be ignored.
**Note:** This property takes precedence over `wantClientAuth` and if its
value is set to true then `wantClientAuth` will be ignored.
- `wantClientAuth`
- `wantClientAuth`
This property is only for an `acceptor`. It tells a client
connecting to this acceptor that 2-way SSL is requested but not required.
Valid values are `true` or `false`. Default is `false`.
This property is only for an `acceptor`. It tells a client connecting to this
acceptor that 2-way SSL is requested but not required. Valid values are `true`
or `false`. Default is `false`.
Note that if the property `needClientAuth` is set to true then that
property will take precedence and this property will be ignored.
**Note:** If the property `needClientAuth` is set to `true` then that
property will take precedence and this property will be ignored.
- `verifyHost`
- `verifyHost`
When used on an `acceptor` the `CN` of the connecting client's SSL certificate
will be compared to its hostname to verify they match. This is useful
only for 2-way SSL.
When used on an `acceptor` the `CN` of the connecting client's SSL
certificate will be compared to its hostname to verify they match. This is
useful only for 2-way SSL.
When used on a `connector` the `CN` of the server's SSL certificate will be
compared to its hostname to verify they match. This is useful for both 1-way
and 2-way SSL.
When used on a `connector` the `CN` of the server's SSL certificate will be
compared to its hostname to verify they match. This is useful for both 1-way
and 2-way SSL.
Valid values are `true` or `false`. Default is `false`.
Valid values are `true` or `false`. Default is `false`.
- `trustAll`
- `trustAll`
When used on a `connector` the client will trust the provided server certificate
implicitly, regardless of any configured trust store. **Warning:** This setting is
primarily for testing purposes only and should not be used in production.
When used on a `connector` the client will trust the provided server
certificate implicitly, regardless of any configured trust store. **Warning:**
This setting is primarily for testing purposes only and should not be used in
production.
Valid values are `true` or `false`. Default is `false`.
Valid values are `true` or `false`. Default is `false`.
- `forceSSLParameters`
- `forceSSLParameters`
When used on a `connector` any SSL settings that are set as parameters on the connector will
be used instead of JVM system properties including both javax.net.ssl and ActiveMQ system properties
to configure the SSL context for this connector.
When used on a `connector` any SSL settings that are set as parameters on the
connector will be used instead of JVM system properties including both
javax.net.ssl and ActiveMQ system properties to configure the SSL context for
this connector.
Valid values are `true` or `false`. Default is `false`.
Valid values are `true` or `false`. Default is `false`.
- `useDefaultSslContext`
- `useDefaultSslContext`
Only valid on a `connector`. Allows the `connector` to use the "default" SSL
context (via `SSLContext.getDefault()`) which can be set programmatically by
the client (via `SSLContext.setDefault(SSLContext)`). If set to `true` all
other SSL related parameters except for `sslEnabled` are ignored.
Only valid on a `connector`. Allows the `connector` to use the "default" SSL
context (via `SSLContext.getDefault()`) which can be set programmatically by
the client (via `SSLContext.setDefault(SSLContext)`). If set to `true` all
other SSL related parameters except for `sslEnabled` are ignored.
Valid values are `true` or `false`. Default is `false`.
Valid values are `true` or `false`. Default is `false`.
- `sslProvider`
Used to change the SSL Provider between `JDK` and `OPENSSL`. The default is `JDK`.
If used with `OPENSSL` you can add `netty-tcnative` to your classpath to use the native
installed openssl. This can be useful if you want to use special ciphersuite - elliptic curve combinations
which are support through openssl but not through the JDK provider. See https://en.wikipedia.org/wiki/Comparison_of_TLS_implementations
for more information's.
- `sslProvider`
Used to change the SSL Provider between `JDK` and `OPENSSL`. The default is
`JDK`. If used with `OPENSSL` you can add `netty-tcnative` to your classpath
to use the native installed openssl. This can be useful if you want to use
special ciphersuite - elliptic curve combinations which are support through
openssl but not through the JDK provider. See
https://en.wikipedia.org/wiki/Comparison_of_TLS_implementations for more
information's.
### Configuring Netty HTTP
Netty HTTP tunnels packets over the HTTP protocol. It can be useful in
@ -481,22 +470,22 @@ Please see the examples for a full working example of using Netty HTTP.
Netty HTTP uses the same properties as Netty TCP but adds the following
additional properties:
- `httpEnabled`. This is now no longer needed. With single port support
Apache ActiveMQ Artemis will now automatically detect if http is being
used and configure itself.
- `httpEnabled`. This is now no longer needed. With single port support Apache
ActiveMQ Artemis will now automatically detect if http is being used and
configure itself.
- `httpClientIdleTime`. How long a client can be idle before
sending an empty http request to keep the connection alive
- `httpClientIdleTime`. How long a client can be idle before sending an empty
http request to keep the connection alive
- `httpClientIdleScanPeriod`. How often, in milliseconds, to scan
for idle clients
- `httpClientIdleScanPeriod`. How often, in milliseconds, to scan for idle
clients
- `httpResponseTime`. How long the server can wait before sending an
empty http response to keep the connection alive
- `httpResponseTime`. How long the server can wait before sending an empty http
response to keep the connection alive
- `httpServerScanPeriod`. How often, in milliseconds, to scan for
clients needing responses
- `httpServerScanPeriod`. How often, in milliseconds, to scan for clients
needing responses
- `httpRequiresSessionId`. If `true` the client will wait after the
first call to receive a session id. Used the http connector is
connecting to servlet acceptor (not recommended)
- `httpRequiresSessionId`. If `true` the client will wait after the first call
to receive a session id. Used the http connector is connecting to servlet
acceptor (not recommended)

View File

@ -18,8 +18,7 @@ ServerLocator locator = null;
ClientSessionFactory sf = null;
ClientSession session = null;
try
{
try {
locator = ActiveMQClient.createServerLocatorWithoutHA(..);
sf = locator.createClientSessionFactory();;
@ -27,21 +26,16 @@ try
session = sf.createSession(...);
... do some stuff with the session...
}
finally
{
if (session != null)
{
} finally {
if (session != null) {
session.close();
}
if (sf != null)
{
if (sf != null) {
sf.close();
}
if(locator != null)
{
if(locator != null) {
locator.close();
}
}
@ -52,18 +46,14 @@ And here's an example of a well behaved JMS client application:
```java
Connection jmsConnection = null;
try
{
try {
ConnectionFactory jmsConnectionFactory = new ActiveMQConnectionFactory("tcp://localhost:61616");
jmsConnection = jmsConnectionFactory.createConnection();
... do some stuff with the connection...
}
finally
{
if (connection != null)
{
} finally {
if (connection != null) {
connection.close();
}
}
@ -73,17 +63,13 @@ finally
Or with using auto-closeable feature from Java, which can save a few lines of code:
```java
try (
ActiveMQConnectionFactory jmsConnectionFactory = new ActiveMQConnectionFactory("tcp://localhost:61616");
Connection jmsConnection = jmsConnectionFactory.createConnection())
{
Connection jmsConnection = jmsConnectionFactory.createConnection()) {
... do some stuff with the connection...
}
```
Unfortunately users don't always write well behaved applications, and
sometimes clients just crash so they don't have a chance to clean up
their resources!
@ -186,17 +172,17 @@ from a thread pool so that the remoting thread is not tied up for too
long. Please note that processing operations asynchronously on another
thread adds a little more latency. These packets are:
- `org.apache.activemq.artemis.core.protocol.core.impl.wireformat.RollbackMessage`
- `org.apache.activemq.artemis.core.protocol.core.impl.wireformat.RollbackMessage`
- `org.apache.activemq.artemis.core.protocol.core.impl.wireformat.SessionCloseMessage`
- `org.apache.activemq.artemis.core.protocol.core.impl.wireformat.SessionCloseMessage`
- `org.apache.activemq.artemis.core.protocol.core.impl.wireformat.SessionCommitMessage`
- `org.apache.activemq.artemis.core.protocol.core.impl.wireformat.SessionCommitMessage`
- `org.apache.activemq.artemis.core.protocol.core.impl.wireformat.SessionXACommitMessage`
- `org.apache.activemq.artemis.core.protocol.core.impl.wireformat.SessionXACommitMessage`
- `org.apache.activemq.artemis.core.protocol.core.impl.wireformat.SessionXAPrepareMessage`
- `org.apache.activemq.artemis.core.protocol.core.impl.wireformat.SessionXAPrepareMessage`
- `org.apache.activemq.artemis.core.protocol.core.impl.wireformat.SessionXARollbackMessage`
- `org.apache.activemq.artemis.core.protocol.core.impl.wireformat.SessionXARollbackMessage`
To disable asynchronous connection execution, set the parameter
`async-connection-execution-enabled` in `broker.xml` to

View File

@ -1,45 +1,46 @@
# Core Bridges
The function of a bridge is to consume messages from a source queue, and
forward them to a target address, typically on a different Apache ActiveMQ Artemis
server.
forward them to a target address, typically on a different Apache ActiveMQ
Artemis server.
The source and target servers do not have to be in the same cluster
which makes bridging suitable for reliably sending messages from one
cluster to another, for instance across a WAN, or internet and where the
connection may be unreliable.
The source and target servers do not have to be in the same cluster which makes
bridging suitable for reliably sending messages from one cluster to another,
for instance across a WAN, or internet and where the connection may be
unreliable.
The bridge has built in resilience to failure so if the target server
connection is lost, e.g. due to network failure, the bridge will retry
connecting to the target until it comes back online. When it comes back
online it will resume operation as normal.
connecting to the target until it comes back online. When it comes back online
it will resume operation as normal.
In summary, bridges are a way to reliably connect two separate Apache ActiveMQ Artemis
servers together. With a core bridge both source and target servers must
be Apache ActiveMQ Artemis servers.
In summary, bridges are a way to reliably connect two separate Apache ActiveMQ
Artemis servers together. With a core bridge both source and target servers
must be Apache ActiveMQ Artemis servers.
Bridges can be configured to provide *once and only once* delivery
guarantees even in the event of the failure of the source or the target
server. They do this by using duplicate detection (described in [Duplicate Detection](duplicate-detection.md)).
Bridges can be configured to provide *once and only once* delivery guarantees
even in the event of the failure of the source or the target server. They do
this by using duplicate detection (described in [Duplicate
Detection](duplicate-detection.md)).
> **Note**
> **Note:**
>
> Although they have similar function, don't confuse core bridges with
> JMS bridges!
> Although they have similar function, don't confuse core bridges with JMS
> bridges!
>
> Core bridges are for linking an Apache ActiveMQ Artemis node with another Apache ActiveMQ Artemis
> node and do not use the JMS API. A JMS Bridge is used for linking any
> two JMS 1.1 compliant JMS providers. So, a JMS Bridge could be used
> for bridging to or from different JMS compliant messaging system. It's
> always preferable to use a core bridge if you can. Core bridges use
> duplicate detection to provide *once and only once* guarantees. To
> provide the same guarantee using a JMS bridge you would have to use XA
> which has a higher overhead and is more complex to configure.
> Core bridges are for linking an Apache ActiveMQ Artemis node with another
> Apache ActiveMQ Artemis node and do not use the JMS API. A JMS Bridge is used
> for linking any two JMS 1.1 compliant JMS providers. So, a JMS Bridge could
> be used for bridging to or from different JMS compliant messaging system.
> It's always preferable to use a core bridge if you can. Core bridges use
> duplicate detection to provide *once and only once* guarantees. To provide
> the same guarantee using a JMS bridge you would have to use XA which has a
> higher overhead and is more complex to configure.
## Configuring Bridges
Bridges are configured in `broker.xml`. Let's kick off
with an example (this is actually from the bridge example):
Bridges are configured in `broker.xml`. Let's kick off with an example (this is
actually from the bridge example):
```xml
<bridge name="my-bridge">
@ -68,154 +69,144 @@ with an example (this is actually from the bridge example):
</bridge>
```
In the above example we have shown all the parameters its possible to
configure for a bridge. In practice you might use many of the defaults
so it won't be necessary to specify them all explicitly.
In the above example we have shown all the parameters its possible to configure
for a bridge. In practice you might use many of the defaults so it won't be
necessary to specify them all explicitly.
Let's take a look at all the parameters in turn:
- `name` attribute. All bridges must have a unique name in the server.
- `name` attribute. All bridges must have a unique name in the server.
- `queue-name`. This is the unique name of the local queue that the
bridge consumes from, it's a mandatory parameter.
- `queue-name`. This is the unique name of the local queue that the bridge
consumes from, it's a mandatory parameter.
The queue must already exist by the time the bridge is instantiated
at start-up.
The queue must already exist by the time the bridge is instantiated at
start-up.
- `forwarding-address`. This is the address on the target server that
the message will be forwarded to. If a forwarding address is not
specified, then the original address of the message will be
retained.
- `forwarding-address`. This is the address on the target server that the
message will be forwarded to. If a forwarding address is not specified, then
the original address of the message will be retained.
- `filter-string`. An optional filter string can be supplied. If
specified then only messages which match the filter expression
specified in the filter string will be forwarded. The filter string
follows the ActiveMQ Artemis filter expression syntax described in [Filter Expressions](filter-expressions.md).
- `filter-string`. An optional filter string can be supplied. If specified then
only messages which match the filter expression specified in the filter
string will be forwarded. The filter string follows the ActiveMQ Artemis filter
expression syntax described in [Filter Expressions](filter-expressions.md).
- `transformer-class-name`. An optional transformer-class-name can be
specified. This is the name of a user-defined class which implements
the `org.apache.activemq.artemis.core.server.transformer.Transformer` interface.
- `transformer-class-name`. An optional transformer-class-name can be
specified. This is the name of a user-defined class which implements the
`org.apache.activemq.artemis.core.server.transformer.Transformer` interface.
If this is specified then the transformer's `transform()` method
will be invoked with the message before it is forwarded. This gives
you the opportunity to transform the message's header or body before
forwarding it.
If this is specified then the transformer's `transform()` method will be
invoked with the message before it is forwarded. This gives you the opportunity
to transform the message's header or body before forwarding it.
- `ha`. This optional parameter determines whether or not this bridge
should support high availability. True means it will connect to any
available server in a cluster and support failover. The default
value is `false`.
- `ha`. This optional parameter determines whether or not this bridge should
support high availability. True means it will connect to any available server
in a cluster and support failover. The default value is `false`.
- `retry-interval`. This optional parameter determines the period in
milliseconds between subsequent reconnection attempts, if the
connection to the target server has failed. The default value is
`2000`milliseconds.
- `retry-interval`. This optional parameter determines the period in
milliseconds between subsequent reconnection attempts, if the connection to
the target server has failed. The default value is `2000`milliseconds.
- `retry-interval-multiplier`. This optional parameter determines
determines a multiplier to apply to the time since the last retry to
compute the time to the next retry.
- `retry-interval-multiplier`. This optional parameter determines determines a
multiplier to apply to the time since the last retry to compute the time to
the next retry.
This allows you to implement an *exponential backoff* between retry
attempts.
This allows you to implement an *exponential backoff* between retry
attempts.
Let's take an example:
Let's take an example:
If we set `retry-interval`to `1000` ms and we set
`retry-interval-multiplier` to `2.0`, then, if the first reconnect
attempt fails, we will wait `1000` ms then `2000` ms then `4000` ms
between subsequent reconnection attempts.
If we set `retry-interval`to `1000` ms and we set `retry-interval-multiplier`
to `2.0`, then, if the first reconnect attempt fails, we will wait `1000` ms
then `2000` ms then `4000` ms between subsequent reconnection attempts.
The default value is `1.0` meaning each reconnect attempt is spaced
at equal intervals.
The default value is `1.0` meaning each reconnect attempt is spaced at equal
intervals.
- `initial-connect-attempts`. This optional parameter determines the
total number of initial connect attempts the bridge will make before
giving up and shutting down. A value of `-1` signifies an unlimited
number of attempts. The default value is `-1`.
- `initial-connect-attempts`. This optional parameter determines the total
number of initial connect attempts the bridge will make before giving up and
shutting down. A value of `-1` signifies an unlimited number of attempts. The
default value is `-1`.
- `reconnect-attempts`. This optional parameter determines the total
number of reconnect attempts the bridge will make before giving up
and shutting down. A value of `-1` signifies an unlimited number of
attempts. The default value is `-1`.
- `reconnect-attempts`. This optional parameter determines the total number of
reconnect attempts the bridge will make before giving up and shutting down. A
value of `-1` signifies an unlimited number of attempts. The default value is
`-1`.
- `failover-on-server-shutdown`. This optional parameter determines
whether the bridge will attempt to failover onto a backup server (if
specified) when the target server is cleanly shutdown rather than
crashed.
- `failover-on-server-shutdown`. This optional parameter determines whether the
bridge will attempt to failover onto a backup server (if specified) when the
target server is cleanly shutdown rather than crashed.
The bridge connector can specify both a live and a backup server, if
it specifies a backup server and this parameter is set to `true`
then if the target server is *cleanly* shutdown the bridge
connection will attempt to failover onto its backup. If the bridge
connector has no backup server configured then this parameter has no
effect.
The bridge connector can specify both a live and a backup server, if it
specifies a backup server and this parameter is set to `true` then if the
target server is *cleanly* shutdown the bridge connection will attempt to
failover onto its backup. If the bridge connector has no backup server
configured then this parameter has no effect.
Sometimes you want a bridge configured with a live and a backup
target server, but you don't want to failover to the backup if the
live server is simply taken down temporarily for maintenance, this
is when this parameter comes in handy.
Sometimes you want a bridge configured with a live and a backup target
server, but you don't want to failover to the backup if the live server is
simply taken down temporarily for maintenance, this is when this parameter
comes in handy.
The default value for this parameter is `false`.
The default value for this parameter is `false`.
- `use-duplicate-detection`. This optional parameter determines
whether the bridge will automatically insert a duplicate id property
into each message that it forwards.
- `use-duplicate-detection`. This optional parameter determines whether the
bridge will automatically insert a duplicate id property into each message
that it forwards.
Doing so, allows the target server to perform duplicate detection on
messages it receives from the source server. If the connection fails
or server crashes, then, when the bridge resumes it will resend
unacknowledged messages. This might result in duplicate messages
being sent to the target server. By enabling duplicate detection
allows these duplicates to be screened out and ignored.
Doing so, allows the target server to perform duplicate detection on messages
it receives from the source server. If the connection fails or server crashes,
then, when the bridge resumes it will resend unacknowledged messages. This
might result in duplicate messages being sent to the target server. By enabling
duplicate detection allows these duplicates to be screened out and ignored.
This allows the bridge to provide a *once and only once* delivery
guarantee without using heavyweight methods such as XA (see [Duplicate Detection](duplicate-detection.md) for
more information).
This allows the bridge to provide a *once and only once* delivery guarantee
without using heavyweight methods such as XA (see [Duplicate
Detection](duplicate-detection.md) for more information).
The default value for this parameter is `true`.
The default value for this parameter is `true`.
- `confirmation-window-size`. This optional parameter determines the
`confirmation-window-size` to use for the connection used to forward
messages to the target node. This attribute is described in section
[Reconnection and Session Reattachment](client-reconnection.md)
- `confirmation-window-size`. This optional parameter determines the
`confirmation-window-size` to use for the connection used to forward messages
to the target node. This attribute is described in section [Reconnection and
Session Reattachment](client-reconnection.md)
> **Warning**
>
> When using the bridge to forward messages to an address which uses
> the `BLOCK` `address-full-policy` from a queue which has a
> `max-size-bytes` set it's important that
> `confirmation-window-size` is less than or equal to
> `max-size-bytes` to prevent the flow of messages from ceasing.
> **Warning**
>
> When using the bridge to forward messages to an address which uses the
> `BLOCK` `address-full-policy` from a queue which has a `max-size-bytes` set
> it's important that `confirmation-window-size` is less than or equal to
> `max-size-bytes` to prevent the flow of messages from ceasing.
- `producer-window-size`. This optional parameter determines the
producer flow control through the bridge. You usually leave this off
unless you are dealing with huge large messages.
- `producer-window-size`. This optional parameter determines the producer flow
control through the bridge. You usually leave this off unless you are dealing
with huge large messages.
Default=-1 (disabled)
Default=-1 (disabled)
- `user`. This optional parameter determines the user name to use when
creating the bridge connection to the remote server. If it is not
specified the default cluster user specified by `cluster-user` in
`broker.xml` will be used.
- `user`. This optional parameter determines the user name to use when creating
the bridge connection to the remote server. If it is not specified the
default cluster user specified by `cluster-user` in `broker.xml` will be used.
- `password`. This optional parameter determines the password to use
when creating the bridge connection to the remote server. If it is
not specified the default cluster password specified by
`cluster-password` in `broker.xml` will be used.
- `password`. This optional parameter determines the password to use when
creating the bridge connection to the remote server. If it is not specified
the default cluster password specified by `cluster-password` in `broker.xml`
will be used.
- `static-connectors` or `discovery-group-ref`. Pick either of these
options to connect the bridge to the target server.
- `static-connectors` or `discovery-group-ref`. Pick either of these options to
connect the bridge to the target server.
The `static-connectors` is a list of `connector-ref` elements
pointing to `connector` elements defined elsewhere. A *connector*
encapsulates knowledge of what transport to use (TCP, SSL, HTTP etc)
as well as the server connection parameters (host, port etc). For
more information about what connectors are and how to configure
them, please see [Configuring the Transport](configuring-transports.md).
The `static-connectors` is a list of `connector-ref` elements pointing to
`connector` elements defined elsewhere. A *connector* encapsulates knowledge of
what transport to use (TCP, SSL, HTTP etc) as well as the server connection
parameters (host, port etc). For more information about what connectors are and
how to configure them, please see [Configuring the
Transport](configuring-transports.md).
The `discovery-group-ref` element has one attribute -
`discovery-group-name`. This attribute points to a `discovery-group`
defined elsewhere. For more information about what discovery-groups
are and how to configure them, please see [Discovery Groups](clusters.md).
The `discovery-group-ref` element has one attribute - `discovery-group-name`.
This attribute points to a `discovery-group` defined elsewhere. For more
information about what discovery-groups are and how to configure them, please
see [Discovery Groups](clusters.md).

228
docs/user-manual/en/core.md Normal file
View File

@ -0,0 +1,228 @@
# Using Core
Apache ActiveMQ Artemis core is a messaging system with its own API. We call
this the *core API*.
If you don't want to use the JMS API or any of the other supported protocols
you can use the core API directly. The core API provides all the functionality
of JMS but without much of the complexity. It also provides features that are
not available using JMS.
## Core Messaging Concepts
Some of the core messaging concepts are similar to JMS concepts, but core
messaging concepts are also different in some ways as well. In general the core
API is simpler than the JMS API, since we remove distinctions between queues,
topics and subscriptions. We'll discuss each of the major core messaging
concepts in turn, but to see the API in detail please consult the Javadoc.
Also refer to the [addressing model](address-model.md) chapter for a high-level
overview of these concepts as well as configuration details.
### Message
- A message is the unit of data which is sent between clients and servers.
- A message has a body which is a buffer containing convenient methods for
reading and writing data into it.
- A message has a set of properties which are key-value pairs. Each property
key is a string and property values can be of type integer, long, short,
byte, byte[], String, double, float or boolean.
- A message has an *address* it is being sent to. When the message arrives on
the server it is routed to any queues that are bound to the address. The
routing semantics (i.e. anycast or multicast) are determined by the "routing
type" of the address and queue. If the queues are bound with any filter, the
message will only be routed to that queue if the filter matches. An address may
have many queues bound to it or even none. There may also be entities other
than queues (e.g. *diverts*) bound to addresses.
- Messages can be either durable or non durable. Durable messages in a durable
queue will survive a server crash or restart. Non durable messages will never
survive a server crash or restart.
- Messages can be specified with a priority value between 0 and 9. 0 represents
the lowest priority and 9 represents the highest. The broker will attempt to
deliver higher priority messages before lower priority ones.
- Messages can be specified with an optional expiry time. The broker will not
deliver messages after its expiry time has been exceeded.
- Messages also have an optional timestamp which represents the time the
message was sent.
- Apache ActiveMQ Artemis also supports the sending/consuming of very large
messages much larger than can fit in available RAM at any one time.
### Address
A server maintains a mapping between an address and a set of queues. Zero or
more queues can be bound to a single address. Each queue can be bound with an
optional message filter. When a message is routed, it is routed to the set of
queues bound to the message's address. If any of the queues are bound with a
filter expression, then the message will only be routed to the subset of bound
queues which match that filter expression.
Other entities, such as *diverts* can also be bound to an address and messages
will also be routed there.
> **Note:**
>
> Although core supports publish-subscribe semantics there is no such thing as
> a "topic" per se. "Topic" is mainly a JMS term. In core we just deal with
> *addresses*, *queues*, and *routing types*.
>
> For example, a JMS topic would be implemented by a single address to which
> many queues are bound using multicast routing. Each queue represents a
> "subscription" in normal "topic" terms. A JMS queue would be implemented as a
> single address to which one queue is bound using anycast routing - that queue
> represents the JMS queue.
### Queue
Queues can be durable, meaning the messages they contain survive a server crash
or restart, as long as the messages in them are durable. Non durable queues do
not survive a server restart or crash even if the messages they contain are
durable.
Queues can also be temporary, meaning they are automatically deleted when the
client connection is closed, if they are not explicitly deleted before that.
Queues can be bound with an optional filter expression. If a filter expression
is supplied then the server will only route messages that match that filter
expression to any queues bound to the address.
Many queues can be bound to a single address. A particular queue is only bound
to a maximum of one address.
### Routing Type
The routing type determines the semantics used when routing messages to the
queues bound to the address where the message was sent. Two types are
supported:
- `ANYCAST`
The message is routed to only **one** of the queues bound to the address. If
multiple queues are bound to the address then messages are routed to them in a
round-robin fashion.
- `MULTICAST`
The message is route to **all** of the queues bound to the address.
## Core API
### ServerLocator
Clients use `ServerLocator` instances to create `ClientSessionFactory`
instances. `ServerLocator` instances are used to locate servers and create
connections to them.
In JMS terms think of a `ServerLocator` in the same way you would a JMS
Connection Factory.
`ServerLocator` instances are created using the `ActiveMQClient` factory class.
### ClientSessionFactory
Clients use `ClientSessionFactory` instances to create `ClientSession`
instances. `ClientSessionFactory` instances are basically the connection to a
server
In JMS terms think of them as JMS Connections.
`ClientSessionFactory` instances are created using the `ServerLocator` class.
### ClientSession
A client uses a `ClientSession`for consuming and producing messages and for
grouping them in transactions. `ClientSession` instances can support both
transactional and non transactional semantics and also provide an `XAResource`
interface so messaging operations can be performed as part of a
[JTA](http://www.oracle.com/technetwork/java/javaee/tech/jta-138684.html)
transaction.
`ClientSession` instances group `ClientConsumer` instances and `ClientProducer`
instances.
`ClientSession` instances can be registered with an optional
`SendAcknowledgementHandler`. This allows your client code to be notified
asynchronously when sent messages have successfully reached the server. This
unique Apache ActiveMQ Artemis feature, allows you to have full guarantees that
sent messages have reached the server without having to block on each message
sent until a response is received. Blocking on each messages sent is costly
since it requires a network round trip for each message sent. By not blocking
and receiving send acknowledgements asynchronously you can create true end to
end asynchronous systems which is not possible using the standard JMS API. For
more information on this advanced feature please see the section [Guarantees of
sends and commits](send-guarantees.md).
### ClientConsumer
Clients use `ClientConsumer` instances to consume messages from a queue. Core
messaging supports both synchronous and asynchronous message consumption
semantics. `ClientConsumer` instances can be configured with an optional filter
expression and will only consume messages which match that expression.
### ClientProducer
Clients create `ClientProducer` instances on `ClientSession` instances so they
can send messages. `ClientProducer` instances can specify an address to which
all sent messages are routed, or they can have no specified address, and the
address is specified at send time for the message.
> **Warning**
>
> Please note that `ClientSession`, `ClientProducer` and `ClientConsumer`
> instances are *designed to be re-used*.
>
> It's an anti-pattern to create new `ClientSession`, `ClientProducer` and
> `ClientConsumer` instances for each message you produce or consume. If you do
> this, your application will perform very poorly. This is discussed further
> in the section on performance tuning [Performance Tuning](perf-tuning.md).
## A simple example of using Core
Here's a very simple program using the core messaging API to send and receive a
message. Logically it's comprised of two sections: firstly setting up the
producer to write a message to an *addresss*, and secondly, creating a *queue*
for the consumer using anycast routing, creating the consumer, and *starting*
it.
```java
ServerLocator locator = ActiveMQClient.createServerLocator("vm://0");
// In this simple example, we just use one session for both producing and receiving
ClientSessionFactory factory = locator.createClientSessionFactory();
ClientSession session = factory.createSession();
// A producer is associated with an address ...
ClientProducer producer = session.createProducer("example");
ClientMessage message = session.createMessage(true);
message.getBodyBuffer().writeString("Hello");
// We need a queue attached to the address ...
session.createQueue("example", RoutingType.ANYCAST, "example", true);
// And a consumer attached to the queue ...
ClientConsumer consumer = session.createConsumer("example");
// Once we have a queue, we can send the message ...
producer.send(message);
// We need to start the session before we can -receive- messages ...
session.start();
ClientMessage msgReceived = consumer.receive();
System.out.println("message = " + msgReceived.getBodyBuffer().readString());
session.close();
```

View File

@ -22,7 +22,7 @@ You can use these following configuration options on broker.xml to configure how
Name | Description
:--- | :---
--- | ---
critical-analyzer | Enable or disable the critical analysis (default true)
critical-analyzer-timeout | Timeout used to do the critical analysis (default 120000 milliseconds)
critical-analyzer-check-period | Time used to check the response times (default half of critical-analyzer-timeout)
@ -42,7 +42,7 @@ If you have critical-analyzer-policy=HALT
[Artemis Critical Analyzer] 18:10:00,831 ERROR [org.apache.activemq.artemis.core.server] AMQ224079: The process for the virtual machine will be killed, as component org.apache.activemq.artemis.tests.integration.critical.CriticalSimpleTest$2@5af97850 is not responsive
```
While if you have critical-analyzer-policy=SHUTDOWN
While if you have critical-analyzer-policy=`SHUTDOWN`
```
[Artemis Critical Analyzer] 18:07:53,475 ERROR [org.apache.activemq.artemis.core.server] AMQ224080: The server process will now be stopped, as component org.apache.activemq.artemis.tests.integration.critical.CriticalSimpleTest$2@5af97850 is not responsive
@ -85,7 +85,7 @@ AMQ119003: End Thread dump
- The Server will be halted if configured to `HALT`
- The system will be stopped if `SHUTDOWN` is used:
* Notice that if the system is not behaving well, there is no guarantees the stop will work.
- The system will be stopped if `SHUTDOWN` is used. **Notice**: If the system
is not behaving well, there is no guarantees the stop will work.

View File

@ -0,0 +1,348 @@
# Data Tools
You can use the Artemis CLI to execute data maintenance tools:
This is a list of sub-commands available
Name | Description
---|---
exp | Export the message data using a special and independent XML format
imp | Imports the journal to a running broker using the output from expt
data | Prints a report about journal records and summary of existent records, as well a report on paging
encode | shows an internal format of the journal encoded to String
decode | imports the internal journal format from encode
You can use the help at the tool for more information on how to execute each of the tools. For example:
```
$ ./artemis help data print
NAME
artemis data print - Print data records information (WARNING: don't use
while a production server is running)
SYNOPSIS
artemis data print [--bindings <binding>] [--broker <brokerConfig>]
[--f] [--jdbc] [--jdbc-bindings-table-name <jdbcBindings>]
[--jdbc-connection-url <jdbcURL>]
[--jdbc-driver-class-name <jdbcClassName>]
[--jdbc-large-message-table-name <jdbcLargeMessages>]
[--jdbc-message-table-name <jdbcMessages>]
[--jdbc-page-store-table-name <jdbcPageStore>] [--journal <journal>]
[--large-messages <largeMessges>] [--output <output>]
[--paging <paging>] [--safe] [--verbose] [--] [<configuration>]
OPTIONS
--bindings <binding>
The folder used for bindings (default from broker.xml)
--broker <brokerConfig>
This would override the broker configuration from the bootstrap
--f
This will allow certain tools like print-data to be performed
ignoring any running servers. WARNING: Changing data concurrently
with a running broker may damage your data. Be careful with this
option.
--jdbc
It will activate jdbc
--jdbc-bindings-table-name <jdbcBindings>
Name of the jdbc bindigns table
--jdbc-connection-url <jdbcURL>
The connection used for the database
--jdbc-driver-class-name <jdbcClassName>
JDBC driver classname
--jdbc-large-message-table-name <jdbcLargeMessages>
Name of the large messages table
--jdbc-message-table-name <jdbcMessages>
Name of the jdbc messages table
--jdbc-page-store-table-name <jdbcPageStore>
Name of the page sotre messages table
--journal <journal>
The folder used for messages journal (default from broker.xml)
--large-messages <largeMessges>
The folder used for large-messages (default from broker.xml)
--output <output>
Output name for the file
--paging <paging>
The folder used for paging (default from broker.xml)
--safe
It will print your data structure without showing your data
--verbose
Adds more information on the execution
--
This option can be used to separate command-line options from the
list of argument, (useful when arguments might be mistaken for
command-line options
<configuration>
Broker Configuration URI, default
'xml:${ARTEMIS_INSTANCE}/etc/bootstrap.xml'
```
For a full list of data tools commands available use:
```
$ ./artemis help data
NAME
artemis data - data tools group (print|imp|exp|encode|decode|compact)
(example ./artemis data print)
SYNOPSIS
artemis data
artemis data compact [--verbose] [--paging <paging>]
[--journal <journal>] [--large-messages <largeMessges>]
[--broker <brokerConfig>] [--bindings <binding>]
artemis data decode [--verbose] [--suffix <suffix>] [--paging <paging>]
[--prefix <prefix>] [--file-size <size>] --input <input>
[--journal <journal>] [--directory <directory>]
[--large-messages <largeMessges>] [--broker <brokerConfig>]
[--bindings <binding>]
artemis data encode [--verbose] [--directory <directory>]
[--suffix <suffix>] [--paging <paging>] [--prefix <prefix>]
[--file-size <size>] [--journal <journal>]
[--large-messages <largeMessges>] [--broker <brokerConfig>]
[--bindings <binding>]
artemis data exp [--jdbc-bindings-table-name <jdbcBindings>]
[--jdbc-message-table-name <jdbcMessages>] [--paging <paging>]
[--jdbc-connection-url <jdbcURL>]
[--jdbc-large-message-table-name <jdbcLargeMessages>] [--f]
[--large-messages <largeMessges>] [--broker <brokerConfig>]
[--jdbc-page-store-table-name <jdbcPageStore>]
[--jdbc-driver-class-name <jdbcClassName>] [--jdbc] [--verbose]
[--journal <journal>] [--output <output>] [--bindings <binding>]
artemis data imp [--user <user>] [--legacy-prefixes] [--verbose]
[--host <host>] [--port <port>] [--transaction] --input <input>
[--password <password>] [--sort]
artemis data print [--jdbc-bindings-table-name <jdbcBindings>]
[--jdbc-message-table-name <jdbcMessages>] [--paging <paging>]
[--jdbc-connection-url <jdbcURL>]
[--jdbc-large-message-table-name <jdbcLargeMessages>] [--f]
[--large-messages <largeMessges>] [--broker <brokerConfig>]
[--jdbc-page-store-table-name <jdbcPageStore>]
[--jdbc-driver-class-name <jdbcClassName>] [--safe] [--jdbc] [--verbose]
[--journal <journal>] [--output <output>] [--bindings <binding>]
COMMANDS
With no arguments, Display help information
print
Print data records information (WARNING: don't use while a
production server is running)
With --jdbc-bindings-table-name option, Name of the jdbc bindigns
table
With --jdbc-message-table-name option, Name of the jdbc messages
table
With --paging option, The folder used for paging (default from
broker.xml)
With --jdbc-connection-url option, The connection used for the
database
With --jdbc-large-message-table-name option, Name of the large
messages table
With --f option, This will allow certain tools like print-data to be
performed ignoring any running servers. WARNING: Changing data
concurrently with a running broker may damage your data. Be careful
with this option.
With --large-messages option, The folder used for large-messages
(default from broker.xml)
With --broker option, This would override the broker configuration
from the bootstrap
With --jdbc-page-store-table-name option, Name of the page sotre
messages table
With --jdbc-driver-class-name option, JDBC driver classname
With --safe option, It will print your data structure without
showing your data
With --jdbc option, It will activate jdbc
With --verbose option, Adds more information on the execution
With --journal option, The folder used for messages journal (default
from broker.xml)
With --output option, Output name for the file
With --bindings option, The folder used for bindings (default from
broker.xml)
exp
Export all message-data using an XML that could be interpreted by
any system.
With --jdbc-bindings-table-name option, Name of the jdbc bindigns
table
With --jdbc-message-table-name option, Name of the jdbc messages
table
With --paging option, The folder used for paging (default from
broker.xml)
With --jdbc-connection-url option, The connection used for the
database
With --jdbc-large-message-table-name option, Name of the large
messages table
With --f option, This will allow certain tools like print-data to be
performed ignoring any running servers. WARNING: Changing data
concurrently with a running broker may damage your data. Be careful
with this option.
With --large-messages option, The folder used for large-messages
(default from broker.xml)
With --broker option, This would override the broker configuration
from the bootstrap
With --jdbc-page-store-table-name option, Name of the page sotre
messages table
With --jdbc-driver-class-name option, JDBC driver classname
With --jdbc option, It will activate jdbc
With --verbose option, Adds more information on the execution
With --journal option, The folder used for messages journal (default
from broker.xml)
With --output option, Output name for the file
With --bindings option, The folder used for bindings (default from
broker.xml)
imp
Import all message-data using an XML that could be interpreted by
any system.
With --user option, User name used to import the data. (default
null)
With --legacy-prefixes option, Do not remove prefixes from legacy
imports
With --verbose option, Adds more information on the execution
With --host option, The host used to import the data (default
localhost)
With --port option, The port used to import the data (default 61616)
With --transaction option, If this is set to true you will need a
whole transaction to commit at the end. (default false)
With --input option, The input file name (default=exp.dmp)
With --password option, User name used to import the data. (default
null)
With --sort option, Sort the messages from the input (used for older
versions that won't sort messages)
decode
Decode a journal's internal format into a new journal set of files
With --verbose option, Adds more information on the execution
With --suffix option, The journal suffix (default amq)
With --paging option, The folder used for paging (default from
broker.xml)
With --prefix option, The journal prefix (default activemq-data)
With --file-size option, The journal size (default 10485760)
With --input option, The input file name (default=exp.dmp)
With --journal option, The folder used for messages journal (default
from broker.xml)
With --directory option, The journal folder (default journal folder
from broker.xml)
With --large-messages option, The folder used for large-messages
(default from broker.xml)
With --broker option, This would override the broker configuration
from the bootstrap
With --bindings option, The folder used for bindings (default from
broker.xml)
encode
Encode a set of journal files into an internal encoded data format
With --verbose option, Adds more information on the execution
With --directory option, The journal folder (default the journal
folder from broker.xml)
With --suffix option, The journal suffix (default amq)
With --paging option, The folder used for paging (default from
broker.xml)
With --prefix option, The journal prefix (default activemq-data)
With --file-size option, The journal size (default 10485760)
With --journal option, The folder used for messages journal (default
from broker.xml)
With --large-messages option, The folder used for large-messages
(default from broker.xml)
With --broker option, This would override the broker configuration
from the bootstrap
With --bindings option, The folder used for bindings (default from
broker.xml)
compact
Compacts the journal of a non running server
With --verbose option, Adds more information on the execution
With --paging option, The folder used for paging (default from
broker.xml)
With --journal option, The folder used for messages journal (default
from broker.xml)
With --large-messages option, The folder used for large-messages
(default from broker.xml)
With --broker option, This would override the broker configuration
from the bootstrap
With --bindings option, The folder used for bindings (default from
broker.xml)
```

View File

@ -117,9 +117,9 @@ non-exclusive divert, again from the divert example:
```xml
<divert name="order-divert">
<address>orders</address>
<forwarding-address>spyTopic</forwarding-address>
<exclusive>false</exclusive>
<address>orders</address>
<forwarding-address>spyTopic</forwarding-address>
<exclusive>false</exclusive>
</divert>
```

View File

@ -48,7 +48,7 @@ already received a message with that value of the header. If it has
received a message with the same value before then it will ignore the
message.
> **Note**
> **Note:**
>
> Using duplicate detection to move messages between nodes can give you
> the same *once and only once* delivery guarantees as if you were using
@ -118,7 +118,7 @@ configured by the parameter `persist-id-cache`, also in
be persisted to permanent storage as they are received. The default
value for this parameter is `true`.
> **Note**
> **Note:**
>
> When choosing a size of the duplicate id cache be sure to set it to a
> larger enough size so if you resend messages all the previously sent

View File

@ -1,29 +1,30 @@
# Embedding Apache ActiveMQ Artemis
Apache ActiveMQ Artemis is designed as set of simple Plain Old Java Objects (POJOs).
This means Apache ActiveMQ Artemis can be instantiated and run in any dependency
injection framework such as Spring or Google Guice. It also means that if you have an application that could use
messaging functionality internally, then it can *directly instantiate*
Apache ActiveMQ Artemis clients and servers in its own application code to perform that
functionality. We call this *embedding* Apache ActiveMQ Artemis.
Apache ActiveMQ Artemis is designed as set of simple Plain Old Java Objects
(POJOs). This means Apache ActiveMQ Artemis can be instantiated and run in any
dependency injection framework such as Spring or Google Guice. It also means
that if you have an application that could use messaging functionality
internally, then it can *directly instantiate* Apache ActiveMQ Artemis clients
and servers in its own application code to perform that functionality. We call
this *embedding* Apache ActiveMQ Artemis.
Examples of applications that might want to do this include any
application that needs very high performance, transactional, persistent
messaging but doesn't want the hassle of writing it all from scratch.
Examples of applications that might want to do this include any application
that needs very high performance, transactional, persistent messaging but
doesn't want the hassle of writing it all from scratch.
Embedding Apache ActiveMQ Artemis can be done in very few easy steps. Instantiate the
configuration object, instantiate the server, start it, and you have a
Apache ActiveMQ Artemis running in your virtual machine. It's as simple and easy as
that.
Embedding Apache ActiveMQ Artemis can be done in very few easy steps.
Instantiate the configuration object, instantiate the server, start it, and you
have a Apache ActiveMQ Artemis running in your virtual machine. It's as simple
and easy as that.
## Simple Config File Embedding
The simplest way to embed Apache ActiveMQ Artemis is to use the embedded wrapper
classes and configure Apache ActiveMQ Artemis through its configuration files. There
are two different helper classes for this depending on whether your
using the Apache ActiveMQ Artemis Core API or JMS.
The simplest way to embed Apache ActiveMQ Artemis is to use the embedded
wrapper classes and configure Apache ActiveMQ Artemis through its configuration
files. There are two different helper classes for this depending on whether
your using the Apache ActiveMQ Artemis Core API or JMS.
## Embeddeing Apache ActiveMQ Artemis Server
## Embedding an Apache ActiveMQ Artemis Broker
For instantiating a core Apache ActiveMQ Artemis Server, the steps are pretty
simple. The example requires that you have defined a configuration file
@ -38,9 +39,9 @@ EmbeddedActiveMQ embedded = new EmbeddedActiveMQ();
embedded.start();
ClientSessionFactory nettyFactory = ActiveMQClient.createClientSessionFactory(
new TransportConfiguration(
InVMConnectorFactory.class.getName()));
ServerLocator serverLocator = ActiveMQClient.createServerLocator("vm://0");
ClientSessionFactory factory = serverLocator.createSessionFactory();
ClientSession session = factory.createSession();
@ -65,23 +66,22 @@ System.out.println("message = " + msgReceived.getBody().readString());
session.close();
```
The `EmbeddedActiveMQ` class has a few additional setter methods that
allow you to specify a different config file name as well as other
properties. See the javadocs for this class for more details.
The `EmbeddedActiveMQ` class has a few additional setter methods that allow you
to specify a different config file name as well as other properties. See the
javadocs for this class for more details.
## POJO instantiation - Embedding Programmatically
You can follow this step-by-step guide to programmatically embed the
core, non-JMS Apache ActiveMQ Artemis Server instance:
You can follow this step-by-step guide to programmatically embed the core,
non-JMS Apache ActiveMQ Artemis Server instance:
Create the configuration object - this contains configuration
information for an Apache ActiveMQ Artemis instance. The setter methods of this class
allow you to programmatically set configuration options as describe in
the [Server Configuration](configuration-index.md) section.
Create the configuration object - this contains configuration information for
an Apache ActiveMQ Artemis instance. The setter methods of this class allow you
to programmatically set configuration options as describe in the [Server
Configuration](configuration-index.md) section.
The acceptors are configured through `ConfigurationImpl`. Just add the
`NettyAcceptorFactory` on the transports the same way you would through
the main configuration file.
The acceptors are configured through `Configuration`. Just add the acceptor URL
the same way you would through the main configuration file.
```java
import org.apache.activemq.artemis.core.config.Configuration;
@ -90,12 +90,9 @@ import org.apache.activemq.artemis.core.config.impl.ConfigurationImpl;
...
Configuration config = new ConfigurationImpl();
HashSet<TransportConfiguration> transports = new HashSet<TransportConfiguration>();
transports.add(new TransportConfiguration(NettyAcceptorFactory.class.getName()));
transports.add(new TransportConfiguration(InVMAcceptorFactory.class.getName()));
config.setAcceptorConfigurations(transports);
config.addAcceptorConfiguration("in-vm", "vm://0");
config.addAcceptorConfiguration("tcp", "tcp://127.0.0.1:61616");
```
You need to instantiate an instance of
@ -123,8 +120,9 @@ server.start();
## Dependency Frameworks
You may also choose to use a dependency injection framework such as
The Spring Framework. See [Spring Integration](spring-integration.md) for more details on
You may also choose to use a dependency injection framework such as The Spring
Framework. See [Spring Integration](spring-integration.md) for more details on
Spring and Apache ActiveMQ Artemis.
Apache ActiveMQ Artemis standalone uses [Airline](https://github.com/airlift/airline) to bootstrap.
Apache ActiveMQ Artemis standalone uses
[Airline](https://github.com/airlift/airline) to bootstrap.

File diff suppressed because it is too large Load Diff

View File

@ -3,44 +3,43 @@
Exclusive queues are special queues which route all messages to only one
consumer at a time.
This is useful when you want all messages to be processed serially by the same consumer,
when a producer does not specify [Message Grouping](message-grouping.md).
This is useful when you want all messages to be processed serially by the same
consumer, when a producer does not specify [Message Grouping](message-grouping.md).
An example might be orders sent to an address and you need to consume them
in the exact same order they were produced.
Obviously exclusive queues have a draw back that you cannot scale out the consumers to
improve consumption as only one consumer would technically be active.
Obviously exclusive queues have a draw back that you cannot scale out the
consumers to improve consumption as only one consumer would technically be active.
Here we advise that you look at message groups first.
## Configuring Exclusive Queues
Exclusive queues can be statically configured using the `exclusive` boolean property:
Exclusive queues can be statically configured using the `exclusive` boolean
property:
```xml
<configuration ...>
<core ...>
...
<address name="foo.bar">
<multicast>
<queue name="orders1" exclusive="true"/>
</multicast>
</address>
</core>
</configuration>
<address name="foo.bar">
<multicast>
<queue name="orders1" exclusive="true"/>
</multicast>
</address>
```
Specified on creating a Queue by using the CORE api specifying the parameter `exclusive` to `true`.
Specified on creating a Queue by using the CORE api specifying the parameter
`exclusive` to `true`.
Or on auto-create when using the JMS Client by using address parameters when creating the destination used by the consumer.
Or on auto-create when using the JMS Client by using address parameters when
creating the destination used by the consumer.
```java
Queue queue = session.createQueue("my.destination.name?exclusive=true");
Topic topic = session.createTopic("my.destination.name?exclusive=true");
```
Also the default for all queues under and address can be defaulted using the address-setting configuration:
Also the default for all queues under and address can be defaulted using the
`address-setting` configuration:
```xml
<address-setting match="lastValueQueue">
@ -48,12 +47,12 @@ Also the default for all queues under and address can be defaulted using the add
</address-setting>
```
By default, `default-exclusive-queue` is `false`. Address wildcards can be used
to configure exclusive queues for a set of addresses (see [here](wildcard-syntax.md)).
By default, `default-exclusive-queue` is `false`. Address
[wildcards](wildcard-syntax.md) can be used to configure exclusive queues for a
set of addresses.
## Example
See `Exclusive Queue` in [examples](examples.md).
For additional examples see `org.apache.activemq.artemis.tests.integration.jms.client.ExclusiveTest`
See the [exclusive queue example](examples.md#exclusive) which shows how
exclusive queues are configured and used with JMS.

View File

@ -10,19 +10,19 @@ please the JMS javadoc for
Filter expressions are used in several places in Apache ActiveMQ Artemis
- Predefined Queues. When pre-defining a queue, in
`broker.xml` in either the core or jms configuration a filter
expression can be defined for a queue. Only messages that match the
filter expression will enter the queue.
- Predefined Queues. When pre-defining a queue, in
`broker.xml` in either the core or jms configuration a filter
expression can be defined for a queue. Only messages that match the
filter expression will enter the queue.
- Core bridges can be defined with an optional filter expression, only
matching messages will be bridged (see [Core Bridges](core-bridges.md)).
- Core bridges can be defined with an optional filter expression, only
matching messages will be bridged (see [Core Bridges](core-bridges.md)).
- Diverts can be defined with an optional filter expression, only
matching messages will be diverted (see [Diverts](diverts.md)).
- Diverts can be defined with an optional filter expression, only
matching messages will be diverted (see [Diverts](diverts.md)).
- Filter are also used programmatically when creating consumers,
queues and in several places as described in [management](management.md).
- Filter are also used programmatically when creating consumers,
queues and in several places as described in [management](management.md).
There are some differences between JMS selector expressions and Apache ActiveMQ Artemis
core filter expressions. Whereas JMS selector expressions operate on a
@ -31,21 +31,21 @@ JMS message, Apache ActiveMQ Artemis core filter expressions operate on a core m
The following identifiers can be used in a core filter expressions to
refer to attributes of the core message in an expression:
- `AMQPriority`. To refer to the priority of a message. Message
priorities are integers with valid values from `0 - 9`. `0` is the
lowest priority and `9` is the highest. E.g.
`AMQPriority = 3 AND animal = 'aardvark'`
- `AMQPriority`. To refer to the priority of a message. Message
priorities are integers with valid values from `0 - 9`. `0` is the
lowest priority and `9` is the highest. E.g.
`AMQPriority = 3 AND animal = 'aardvark'`
- `AMQExpiration`. To refer to the expiration time of a message. The
value is a long integer.
- `AMQExpiration`. To refer to the expiration time of a message. The
value is a long integer.
- `AMQDurable`. To refer to whether a message is durable or not. The
value is a string with valid values: `DURABLE` or `NON_DURABLE`.
- `AMQDurable`. To refer to whether a message is durable or not. The
value is a string with valid values: `DURABLE` or `NON_DURABLE`.
- `AMQTimestamp`. The timestamp of when the message was created. The
value is a long integer.
- `AMQTimestamp`. The timestamp of when the message was created. The
value is a long integer.
- `AMQSize`. The size of a message in bytes. The value is an integer.
- `AMQSize`. The size of a message in bytes. The value is an integer.
Any other identifiers used in core filter expressions will be assumed to
be properties of the message.

View File

@ -37,11 +37,11 @@ bytes).
The value can be:
- `-1` for an *unbounded* buffer
- `-1` for an *unbounded* buffer
- `0` to not buffer any messages.
- `0` to not buffer any messages.
- `>0` for a buffer with the given maximum size in bytes.
- `>0` for a buffer with the given maximum size in bytes.
Setting the consumer window size can considerably improve performance
depending on the messaging use case. As an example, let's consider the
@ -106,7 +106,7 @@ control. The default value is `-1`.
Please see [the examples chapter](examples.md) for a working example of
limiting consumer rate.
> **Note**
> **Note:**
>
> Rate limited flow control can be used in conjunction with window based
> flow control. Rate limited flow control only effects how many messages
@ -198,7 +198,7 @@ to prevent that max size being exceeded.
Note the policy must be set to `BLOCK` to enable blocking producer flow
control.
> **Note**
> **Note:**
>
> Note that in the default configuration all addresses are set to block
> producers after 10 MiB of message data is in the address. This means
@ -207,7 +207,7 @@ control.
> want this behaviour increase the `max-size-bytes` parameter or change
> the address full message policy.
> **Note**
> **Note:**
>
> Producer credits are allocated from the broker to the client. Flow control
> credit checking (i.e. checking a producer has enough credit) is done on the

View File

@ -1,21 +1,19 @@
# Graceful Server Shutdown
In certain circumstances an administrator might not want to disconnect
all clients immediately when stopping the broker. In this situation the
broker can be configured to shutdown *gracefully* using the
`graceful-shutdown-enabled` boolean configuration parameter.
In certain circumstances an administrator might not want to disconnect all
clients immediately when stopping the broker. In this situation the broker can
be configured to shutdown *gracefully* using the `graceful-shutdown-enabled`
boolean configuration parameter.
When the `graceful-shutdown-enabled` configuration parameter is `true`
and the broker is shutdown it will first prevent any additional clients
from connecting and then it will wait for any existing connections to
be terminated by the client before completing the shutdown process. The
default value is `false`.
When the `graceful-shutdown-enabled` configuration parameter is `true` and the
broker is shutdown it will first prevent any additional clients from connecting
and then it will wait for any existing connections to be terminated by the
client before completing the shutdown process. The default value is `false`.
Of course, it's possible a client could keep a connection to the broker
indefinitely effectively preventing the broker from shutting down
gracefully. To deal with this of situation the
`graceful-shutdown-timeout` configuration parameter is available. This
tells the broker (in milliseconds) how long to wait for all clients to
disconnect before forcefully disconnecting the clients and proceeding
with the shutdown process. The default value is `-1` which means the
broker will wait indefinitely for clients to disconnect.
indefinitely effectively preventing the broker from shutting down gracefully.
To deal with this of situation the `graceful-shutdown-timeout` configuration
parameter is available. This tells the broker (in milliseconds) how long to
wait for all clients to disconnect before forcefully disconnecting the clients
and proceeding with the shutdown process. The default value is `-1` which means
the broker will wait indefinitely for clients to disconnect.

View File

@ -52,14 +52,14 @@ This of course means there will be no Backup Strategy and is the default
if none is provided, however this is used to configure `scale-down`
which we will cover in a later chapter.
> **Note**
> **Note:**
>
> The `ha-policy` configurations replaces any current HA configuration
> in the root of the `broker.xml` configuration. All old
> configuration is now deprecated although best efforts will be made to
> honour it if configured this way.
> **Note**
> **Note:**
>
> Only persistent message data will survive failover. Any non persistent
> message data will not be available after failover.
@ -115,7 +115,7 @@ synchronizing the data with its live server. The time it will take for
this to happen will depend on the amount of data to be synchronized and
the connection speed.
> **Note**
> **Note:**
>
> In general, synchronization occurs in parallel with current network traffic so
> this won't cause any blocking on current clients. However, there is a critical
@ -137,37 +137,37 @@ Cluster Connection also defines how backup servers will find the remote
live servers to pair with. Refer to [Clusters](clusters.md) for details on how this is done,
and how to configure a cluster connection. Notice that:
- Both live and backup servers must be part of the same cluster.
Notice that even a simple live/backup replicating pair will require
a cluster configuration.
- Both live and backup servers must be part of the same cluster.
Notice that even a simple live/backup replicating pair will require
a cluster configuration.
- Their cluster user and password must match.
- Their cluster user and password must match.
Within a cluster, there are two ways that a backup server will locate a
live server to replicate from, these are:
- `specifying a node group`. You can specify a group of live servers
that a backup server can connect to. This is done by configuring
`group-name` in either the `master` or the `slave` element of the
`broker.xml`. A Backup server will only connect to a
live server that shares the same node group name
- `specifying a node group`. You can specify a group of live servers
that a backup server can connect to. This is done by configuring
`group-name` in either the `master` or the `slave` element of the
`broker.xml`. A Backup server will only connect to a
live server that shares the same node group name
- `connecting to any live`. This will be the behaviour if `group-name`
is not configured allowing a backup server to connect to any live
server
- `connecting to any live`. This will be the behaviour if `group-name`
is not configured allowing a backup server to connect to any live
server
> **Note**
> **Note:**
>
> A `group-name` example: suppose you have 5 live servers and 6 backup
> servers:
>
> - `live1`, `live2`, `live3`: with `group-name=fish`
> - `live1`, `live2`, `live3`: with `group-name=fish`
>
> - `live4`, `live5`: with `group-name=bird`
> - `live4`, `live5`: with `group-name=bird`
>
> - `backup1`, `backup2`, `backup3`, `backup4`: with `group-name=fish`
> - `backup1`, `backup2`, `backup3`, `backup4`: with `group-name=fish`
>
> - `backup5`, `backup6`: with `group-name=bird`
> - `backup5`, `backup6`: with `group-name=bird`
>
> After joining the cluster the backups with `group-name=fish` will
> search for live servers with `group-name=fish` to pair with. Since
@ -183,7 +183,7 @@ until it finds a live server that has no current backup configured. If
no live server is available it will wait until the cluster topology
changes and repeats the process.
> **Note**
> **Note:**
>
> This is an important distinction from a shared-store backup, if a
> backup starts and does not find a live server, the server will just
@ -240,101 +240,44 @@ The backup server must be similarly configured but as a `slave`
The following table lists all the `ha-policy` configuration elements for
HA strategy Replication for `master`:
<table summary="HA Replication Master Policy" border="1">
<colgroup>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th>Name</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>`check-for-live-server`</td>
<td>Whether to check the cluster for a (live) server using our own server ID
when starting up. This option is only necessary for performing 'fail-back'
on replicating servers.</td>
</tr>
<tr>
<td>`cluster-name`</td>
<td>Name of the cluster configuration to use for replication. This setting is
only necessary if you configure multiple cluster connections. If configured then
the connector configuration of the cluster configuration with this name will be
used when connecting to the cluster to discover if a live server is already running,
see `check-for-live-server`. If unset then the default cluster connections configuration
is used (the first one configured).</td>
</tr>
<tr>
<td>`group-name`</td>
<td>If set, backup servers will only pair with live servers with matching group-name.</td>
</tr>
<tr>
<td>`initial-replication-sync-timeout`</td>
<td>The amount of time the replicating server will wait at the completion of the initial
replication process for the replica to acknowledge it has received all the necessary
data. The default is 30,000 milliseconds. <strong>Note</strong>: during this interval any
journal related operations will be blocked.</td>
</tr>
</tbody>
</table>
- `check-for-live-server`
Whether to check the cluster for a (live) server using our own server ID when starting up. This option is only necessary for performing 'fail-back' on replicating servers.
- `cluster-name`
Name of the cluster configuration to use for replication. This setting is only necessary if you configure multiple cluster connections. If configured then the connector configuration of the cluster configuration with this name will be used when connecting to the cluster to discover if a live server is already running, see `check-for-live-server`. If unset then the default cluster connections configuration is used (the first one configured).
- `group-name`
If set, backup servers will only pair with live servers with matching group-name.
- `initial-replication-sync-timeout`
The amount of time the replicating server will wait at the completion of the initial replication process for the replica to acknowledge it has received all the necessary data. The default is 30,000 milliseconds. **Note:** during this interval any journal related operations will be blocked.
The following table lists all the `ha-policy` configuration elements for
HA strategy Replication for `slave`:
<table summary="HA Replication Slave Policy" border="1">
<colgroup>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th>Name</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>`cluster-name`</td>
<td>Name of the cluster configuration to use for replication.
This setting is only necessary if you configure multiple cluster
connections. If configured then the connector configuration of
the cluster configuration with this name will be used when
connecting to the cluster to discover if a live server is already
running, see `check-for-live-server`. If unset then the default
cluster connections configuration is used (the first one configured)</td>
</tr>
<tr>
<td>`group-name`</td>
<td>If set, backup servers will only pair with live servers with matching group-name</td>
</tr>
<tr>
<td>`max-saved-replicated-journals-size`</td>
<td>This specifies how many times a replicated backup server
can restart after moving its files on start. Once there are
this number of backup journal files the server will stop permanently
after if fails back.</td>
</tr>
<tr>
<td>`allow-failback`</td>
<td>Whether a server will automatically stop when a another places a
request to take over its place. The use case is when the backup has
failed over</td>
</tr>
<tr>
<td>`initial-replication-sync-timeout`</td>
<td>After failover and the slave has become live, this is
set on the new live server. It represents the amount of time
the replicating server will wait at the completion of the
initial replication process for the replica to acknowledge
it has received all the necessary data. The default is
30,000 milliseconds. <strong>Note</strong>: during this interval any
journal related operations will be blocked.</td>
</tr>
</tbody>
</table>
- `cluster-name`
Name of the cluster configuration to use for replication. This setting is only necessary if you configure multiple cluster connections. If configured then the connector configuration of the cluster configuration with this name will be used when connecting to the cluster to discover if a live server is already running, see `check-for-live-server`. If unset then the default cluster connections configuration is used (the first one configured)
- `group-name`
If set, backup servers will only pair with live servers with matching group-name
- `max-saved-replicated-journals-size`
This specifies how many times a replicated backup server can restart after moving its files on start. Once there are this number of backup journal files the server will stop permanently after if fails back.
- `allow-failback`
Whether a server will automatically stop when a another places a request to take over its place. The use case is when the backup has failed over
- `initial-replication-sync-timeout`
After failover and the slave has become live, this is set on the new live server. It represents the amount of time the replicating server will wait at the completion of the initial replication process for the replica to acknowledge it has received all the necessary data. The default is 30,000 milliseconds. **Note:** during this interval any journal related operations will be blocked.
### Shared Store
@ -402,7 +345,7 @@ In order for live - backup groups to operate properly with a shared
store, both servers must have configured the location of journal
directory to point to the *same shared location* (as explained in [Configuring the message journal](persistence.md))
> **Note**
> **Note:**
>
> todo write something about GFS
@ -504,67 +447,24 @@ automatically by setting the following property in the
The following table lists all the `ha-policy` configuration elements for
HA strategy shared store for `master`:
<table summary="HA Shared Store Master Policy" border="1">
<colgroup>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th>Name</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>`failover-on-server-shutdown`</td>
<td>If set to true then when this server is stopped
normally the backup will become live assuming failover.
If false then the backup server will remain passive.
Note that if false you want failover to occur the you
can use the the management API as explained at [Management](management.md)</td>
</tr>
<tr>
<td>`wait-for-activation`</td>
<td>If set to true then server startup will wait until it is activated.
If set to false then server startup will be done in the background.
Default is true.</td>
</tr>
</tbody>
</table>
- `failover-on-server-shutdown`
If set to true then when this server is stopped normally the backup will become live assuming failover. If false then the backup server will remain passive. Note that if false you want failover to occur the you can use the the management API as explained at [Management](management.md).
- `wait-for-activation`
If set to true then server startup will wait until it is activated. If set to false then server startup will be done in the background. Default is true.
The following table lists all the `ha-policy` configuration elements for
HA strategy Shared Store for `slave`:
<table summary="HA Shared Store Slave Policy" border="1">
<colgroup>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th>Name</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>`failover-on-server-shutdown`</td>
<td>In the case of a backup that has become live. then
when set to true then when this server is stopped normally
the backup will become liveassuming failover. If false then
the backup server will remain passive. Note that if false
you want failover to occur the you can use the the management
API as explained at [Management](management.md)</td>
</tr>
<tr>
<td>`allow-failback`</td>
<td>Whether a server will automatically stop when a another
places a request to take over its place. The use case is
when the backup has failed over.</td>
</tr>
</tbody>
</table>
- `failover-on-server-shutdown`
In the case of a backup that has become live. then when set to true then when this server is stopped normally the backup will become liveassuming failover. If false then the backup server will remain passive. Note that if false you want failover to occur the you can use the the management API as explained at [Management](management.md).
- `allow-failback`
Whether a server will automatically stop when a another places a request to take over its place. The use case is when the backup has failed over.
#### Colocated Backup Servers
@ -613,7 +513,7 @@ say 100 (which is the default) and a connector is using port 61616 then
this will be set to 5545 for the first server created, 5645 for the
second and so on.
> **Note**
> **Note:**
>
> for INVM connectors and Acceptors the id will have
> `colocated_backup_n` appended, where n is the backup server number.
@ -648,40 +548,25 @@ creating server but have the new backups name appended.
The following table lists all the `ha-policy` configuration elements for colocated policy:
<table summary="HA Replication Colocation Policy" border="1">
<colgroup>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th>Name</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>`request-backup`</td>
<td>If true then the server will request a backup on another node</td>
</tr>
<tr>
<td>`backup-request-retries`</td>
<td>How many times the live server will try to request a backup, -1 means for ever.</td>
</tr>
<tr>
<td>`backup-request-retry-interval`</td>
<td>How long to wait for retries between attempts to request a backup server.</td>
</tr>
<tr>
<td>`max-backups`</td>
<td>How many backups a live server can create</td>
</tr>
<tr>
<td>`backup-port-offset`</td>
<td>The offset to use for the Connectors and Acceptors when creating a new backup server.</td>
</tr>
</tbody>
</table>
- `request-backup`
If true then the server will request a backup on another node
- `backup-request-retries`
How many times the live server will try to request a backup, -1 means for ever.
- `backup-request-retry-interval`
How long to wait for retries between attempts to request a backup server.
- `max-backups`
How many backups a live server can create
- `backup-port-offset`
The offset to use for the Connectors and Acceptors when creating a new backup server.
### Scaling Down
@ -814,9 +699,9 @@ be high enough to deal with the time needed to scale down.
Apache ActiveMQ Artemis defines two types of client failover:
- Automatic client failover
- Automatic client failover
- Application-level client failover
- Application-level client failover
Apache ActiveMQ Artemis also provides 100% transparent automatic reattachment of
connections to the same server (e.g. in case of transient network
@ -970,7 +855,7 @@ response will come back. In this case it is not easy for the client to
determine whether the transaction commit was actually processed on the
live server before failure occurred.
> **Note**
> **Note:**
>
> If XA is being used either via JMS or through the core API then an
> `XAException.XA_RETRY` is thrown. This is to inform Transaction
@ -988,7 +873,7 @@ retried, duplicate detection will ensure that any durable messages
resent in the transaction will be ignored on the server to prevent them
getting sent more than once.
> **Note**
> **Note:**
>
> By catching the rollback exceptions and retrying, catching unblocked
> calls and enabling duplicate detection, once and only once delivery
@ -1025,28 +910,13 @@ following:
JMSException error codes
<table summary="HA Replication Colocation Policy" border="1">
<colgroup>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th>Error code</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>FAILOVER</td>
<td>Failover has occurred and we have successfully reattached or reconnected.</td>
</tr>
<tr>
<td>DISCONNECT</td>
<td>No failover has occurred and we are disconnected.</td>
</tr>
</tbody>
</table>
- `FAILOVER`
Failover has occurred and we have successfully reattached or reconnected.
- `DISCONNECT`
No failover has occurred and we are disconnected.
### Application-Level Failover
@ -1063,7 +933,7 @@ connection failure is detected. In your `ExceptionListener`, you would
close your old JMS connections, potentially look up new connection
factory instances from JNDI and creating new connections.
For a working example of application-level failover, please see [the examples](examples.md) chapter.
For a working example of application-level failover, please see [the Application-Layer Failover Example](examples.md#application-layer-failover).
If you are using the core API, then the procedure is very similar: you
would set a `FailureListener` on the core `ClientSession` instances.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 61 KiB

After

Width:  |  Height:  |  Size: 75 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 14 KiB

After

Width:  |  Height:  |  Size: 15 KiB

View File

@ -1,17 +1,18 @@
# Intercepting Operations
Apache ActiveMQ Artemis supports *interceptors* to intercept packets entering and
exiting the server. Incoming and outgoing interceptors are be called for
any packet entering or exiting the server respectively. This allows
custom code to be executed, e.g. for auditing packets, filtering or
other reasons. Interceptors can change the packets they intercept. This
makes interceptors powerful, but also potentially dangerous.
Apache ActiveMQ Artemis supports *interceptors* to intercept packets entering
and exiting the server. Incoming and outgoing interceptors are be called for
any packet entering or exiting the server respectively. This allows custom code
to be executed, e.g. for auditing packets, filtering or other reasons.
Interceptors can change the packets they intercept. This makes interceptors
powerful, but also potentially dangerous.
## Implementing The Interceptors
All interceptors are protocol specific.
An interceptor for the core protocol must implement the interface `Interceptor`:
An interceptor for the core protocol must implement the interface
`Interceptor`:
```java
package org.apache.activemq.artemis.api.core.interceptor;
@ -33,10 +34,10 @@ public interface StompFrameInterceptor extends BaseInterceptor<StompFrame>
}
```
Likewise for MQTT protocol, an interceptor must implement the interface `MQTTInterceptor`:
Likewise for MQTT protocol, an interceptor must implement the interface
`MQTTInterceptor`:
```java
package org.apache.activemq.artemis.core.protocol.mqtt;
```java package org.apache.activemq.artemis.core.protocol.mqtt;
public interface MQTTInterceptor extends BaseInterceptor<MqttMessage>
{
@ -46,16 +47,14 @@ public interface MQTTInterceptor extends BaseInterceptor<MqttMessage>
The returned boolean value is important:
- if `true` is returned, the process continues normally
- if `true` is returned, the process continues normally
- if `false` is returned, the process is aborted, no other
interceptors will be called and the packet will not be processed
further by the server.
- if `false` is returned, the process is aborted, no other interceptors will be
called and the packet will not be processed further by the server.
## Configuring The Interceptors
Both incoming and outgoing interceptors are configured in
`broker.xml`:
Both incoming and outgoing interceptors are configured in `broker.xml`:
```xml
<remoting-incoming-interceptors>
@ -69,39 +68,41 @@ Both incoming and outgoing interceptors are configured in
</remoting-outgoing-interceptors>
```
See the documentation on [adding runtime dependencies](using-server.md) to
See the documentation on [adding runtime dependencies](using-server.md) to
understand how to make your interceptor available to the broker.
## Interceptors on the Client Side
The interceptors can also be run on the Apache ActiveMQ Artemit client side to intercept packets
either sent by the client to the server or by the server to the client.
This is done by adding the interceptor to the `ServerLocator` with the
`addIncomingInterceptor(Interceptor)` or
The interceptors can also be run on the Apache ActiveMQ Artemit client side to
intercept packets either sent by the client to the server or by the server to
the client. This is done by adding the interceptor to the `ServerLocator` with
the `addIncomingInterceptor(Interceptor)` or
`addOutgoingInterceptor(Interceptor)` methods.
As noted above, if an interceptor returns `false` then the sending of
the packet is aborted which means that no other interceptors are be
called and the packet is not be processed further by the client.
Typically this process happens transparently to the client (i.e. it has
no idea if a packet was aborted or not). However, in the case of an
outgoing packet that is sent in a `blocking` fashion a
`ActiveMQException` will be thrown to the caller. The exception is
thrown because blocking sends provide reliability and it is considered
an error for them not to succeed. `Blocking` sends occurs when, for
As noted above, if an interceptor returns `false` then the sending of the
packet is aborted which means that no other interceptors are be called and the
packet is not be processed further by the client. Typically this process
happens transparently to the client (i.e. it has no idea if a packet was
aborted or not). However, in the case of an outgoing packet that is sent in a
`blocking` fashion a `ActiveMQException` will be thrown to the caller. The
exception is thrown because blocking sends provide reliability and it is
considered an error for them not to succeed. `Blocking` sends occurs when, for
example, an application invokes `setBlockOnNonDurableSend(true)` or
`setBlockOnDurableSend(true)` on its `ServerLocator` or if an
application is using a JMS connection factory retrieved from JNDI that
has either `block-on-durable-send` or `block-on-non-durable-send` set to
`true`. Blocking is also used for packets dealing with transactions
(e.g. commit, roll-back, etc.). The `ActiveMQException` thrown will
contain the name of the interceptor that returned false.
`setBlockOnDurableSend(true)` on its `ServerLocator` or if an application is
using a JMS connection factory retrieved from JNDI that has either
`block-on-durable-send` or `block-on-non-durable-send` set to `true`. Blocking
is also used for packets dealing with transactions (e.g. commit, roll-back,
etc.). The `ActiveMQException` thrown will contain the name of the interceptor
that returned false.
As on the server, the client interceptor classes (and their
dependencies) must be added to the classpath to be properly instantiated
and invoked.
As on the server, the client interceptor classes (and their dependencies) must
be added to the classpath to be properly instantiated and invoked.
## Example
## Examples
See [the examples chapter](examples.md) for an example which shows how to use interceptors to add
properties to a message on the server.
See the following examples which show how to use interceptors:
- [Interceptor](examples.md#interceptor)
- [Interceptor AMQP](examples.md#interceptor-amqp)
- [Interceptor Client](examples.md#interceptor-client)
- [Interceptor MQTT](examples.md#interceptor-mqtt)

View File

@ -2,237 +2,222 @@
Apache ActiveMQ Artemis includes a fully functional JMS message bridge.
The function of the bridge is to consume messages from a source queue or
topic, and send them to a target queue or topic, typically on a
different server.
The function of the bridge is to consume messages from a source queue or topic,
and send them to a target queue or topic, typically on a different server.
> *Notice:*
> The JMS Bridge is not intended as a replacement for transformation and more expert systems such as Camel.
> The JMS Bridge may be useful for fast transfers as this chapter covers, but keep in mind that more complex scenarios requiring transformations will require you to use a more advanced transformation system that will play on use cases that will go beyond Apache ActiveMQ Artemis.
The source and target servers do not have to be in the same cluster
which makes bridging suitable for reliably sending messages from one
cluster to another, for instance across a WAN, and where the connection
may be unreliable.
A bridge can be deployed as a standalone application, with Apache ActiveMQ Artemis
standalone server or inside a JBoss AS instance. The source and the
target can be located in the same virtual machine or another one.
The bridge can also be used to bridge messages from other non Apache ActiveMQ Artemis
JMS servers, as long as they are JMS 1.1 compliant.
> **Note**
> **Note:**
>
> Do not confuse a JMS bridge with a core bridge. A JMS bridge can be
> used to bridge any two JMS 1.1 compliant JMS providers and uses the
> JMS API. A core bridge (described in [Core Bridges](core-bridges.md)) is used to bridge any two
> Apache ActiveMQ Artemis instances and uses the core API. Always use a core bridge if
> you can in preference to a JMS bridge. The core bridge will typically
> provide better performance than a JMS bridge. Also the core bridge can
> provide *once and only once* delivery guarantees without using XA.
> The JMS Bridge is not intended as a replacement for transformation and more
> expert systems such as Camel. The JMS Bridge may be useful for fast
> transfers as this chapter covers, but keep in mind that more complex
> scenarios requiring transformations will require you to use a more advanced
> transformation system that will play on use cases that will go beyond Apache
> ActiveMQ Artemis.
The bridge has built-in resilience to failure so if the source or target
server connection is lost, e.g. due to network failure, the bridge will
retry connecting to the source and/or target until they come back
online. When it comes back online it will resume operation as normal.
The source and target servers do not have to be in the same cluster which makes
bridging suitable for reliably sending messages from one cluster to another,
for instance across a WAN, and where the connection may be unreliable.
The bridge can be configured with an optional JMS selector, so it will
only consume messages matching that JMS selector
A bridge can be deployed as a standalone application or as a web application
managed by the embedded Jetty instance bootstrapped with Apache ActiveMQ
Artemis. The source and the target can be located in the same virtual machine
or another one.
It can be configured to consume from a queue or a topic. When it
consumes from a topic it can be configured to consume using a non
durable or durable subscription
The bridge can also be used to bridge messages from other non Apache ActiveMQ
Artemis JMS servers, as long as they are JMS 1.1 compliant.
Typically, the bridge is deployed by the JBoss Micro Container via a
beans configuration file. This would typically be deployed inside the
JBoss Application Server and the following example shows an example of a
beans file that bridges 2 destinations which are actually on the same
server.
> **Note:**
>
> Do not confuse a JMS bridge with a core bridge. A JMS bridge can be used to
> bridge any two JMS 1.1 compliant JMS providers and uses the JMS API. A [core
> bridge](core-bridges.md)) is used to bridge any two Apache ActiveMQ Artemis
> instances and uses the core API. Always use a core bridge if you can in
> preference to a JMS bridge. The core bridge will typically provide better
> performance than a JMS bridge. Also the core bridge can provide *once and
> only once* delivery guarantees without using XA.
The JMS Bridge is a simple POJO so can be deployed with most frameworks,
simply instantiate the `org.apache.activemq.artemis.api.jms.bridge.impl.JMSBridgeImpl`
The bridge has built-in resilience to failure so if the source or target server
connection is lost, e.g. due to network failure, the bridge will retry
connecting to the source and/or target until they come back online. When it
comes back online it will resume operation as normal.
The bridge can be configured with an optional JMS selector, so it will only
consume messages matching that JMS selector
It can be configured to consume from a queue or a topic. When it consumes from
a topic it can be configured to consume using a non durable or durable
subscription
The JMS Bridge is a simple POJO so can be deployed with most frameworks, simply
instantiate the `org.apache.activemq.artemis.api.jms.bridge.impl.JMSBridgeImpl`
class and set the appropriate parameters.
## JMS Bridge Parameters
The main bean deployed is the `JMSBridge` bean. The bean is configurable
by the parameters passed to its constructor.
The main POJO is the `JMSBridge`. It is is configurable by the parameters
passed to its constructor.
> **Note**
>
> To let a parameter be unspecified (for example, if the authentication
> is anonymous or no message selector is provided), use `<null
> />` for the unspecified parameter value.
- Source Connection Factory Factory
- Source Connection Factory Factory
This injects the `SourceCFF` bean (also defined in the beans file). This
bean is used to create the *source* `ConnectionFactory`
This injects the `SourceCFF` bean (also defined in the beans file).
This bean is used to create the *source* `ConnectionFactory`
- Target Connection Factory Factory
- Target Connection Factory Factory
This injects the `TargetCFF` bean (also defined in the beans file). This
bean is used to create the *target* `ConnectionFactory`
This injects the `TargetCFF` bean (also defined in the beans file).
This bean is used to create the *target* `ConnectionFactory`
- Source Destination Factory Factory
- Source Destination Factory Factory
This injects the `SourceDestinationFactory` bean (also defined in the beans
file). This bean is used to create the *source* `Destination`
This injects the `SourceDestinationFactory` bean (also defined in
the beans file). This bean is used to create the *source*
`Destination`
- Target Destination Factory Factory
- Target Destination Factory Factory
This injects the `TargetDestinationFactory` bean (also defined in the beans
file). This bean is used to create the *target* `Destination`
This injects the `TargetDestinationFactory` bean (also defined in
the beans file). This bean is used to create the *target*
`Destination`
- Source User Name
- Source User Name
this parameter is the username for creating the *source* connection
this parameter is the username for creating the *source* connection
- Source Password
- Source Password
this parameter is the parameter for creating the *source* connection
this parameter is the parameter for creating the *source* connection
- Target User Name
- Target User Name
this parameter is the username for creating the *target* connection
this parameter is the username for creating the *target* connection
- Target Password
- Target Password
this parameter is the password for creating the *target* connection
this parameter is the password for creating the *target* connection
- Selector
- Selector
This represents a JMS selector expression used for consuming
messages from the source destination. Only messages that match the
selector expression will be bridged from the source to the target
destination
This represents a JMS selector expression used for consuming
messages from the source destination. Only messages that match the
selector expression will be bridged from the source to the target
destination
The selector expression must follow the [JMS selector
syntax](https://docs.oracle.com/javaee/7/api/javax/jms/Message.html)
The selector expression must follow the [JMS selector
syntax](https://docs.oracle.com/javaee/7/api/javax/jms/Message.html)
- Failure Retry Interval
- Failure Retry Interval
This represents the amount of time in ms to wait between trying to recreate
connections to the source or target servers when the bridge has detected they
have failed
This represents the amount of time in ms to wait between trying to
recreate connections to the source or target servers when the bridge
has detected they have failed
- Max Retries
- Max Retries
This represents the number of times to attempt to recreate connections to the
source or target servers when the bridge has detected they have failed. The
bridge will give up after trying this number of times. `-1` represents 'try
forever'
This represents the number of times to attempt to recreate
connections to the source or target servers when the bridge has
detected they have failed. The bridge will give up after trying this
number of times. `-1` represents 'try forever'
- Quality Of Service
- Quality Of Service
This parameter represents the desired quality of service mode
This parameter represents the desired quality of service mode
Possible values are:
Possible values are:
- `AT_MOST_ONCE`
- `AT_MOST_ONCE`
- `DUPLICATES_OK`
- `DUPLICATES_OK`
- `ONCE_AND_ONLY_ONCE`
- `ONCE_AND_ONLY_ONCE`
See Quality Of Service section for a explanation of these modes.
See Quality Of Service section for a explanation of these modes.
- Max Batch Size
- Max Batch Size
This represents the maximum number of messages to consume from the source
destination before sending them in a batch to the target destination. Its value
must `>= 1`
This represents the maximum number of messages to consume from the
source destination before sending them in a batch to the target
destination. Its value must `>= 1`
- Max Batch Time
- Max Batch Time
This represents the maximum number of milliseconds to wait before sending a
batch to target, even if the number of messages consumed has not reached
`MaxBatchSize`. Its value must be `-1` to represent 'wait forever', or `>= 1`
to specify an actual time
This represents the maximum number of milliseconds to wait before
sending a batch to target, even if the number of messages consumed
has not reached `MaxBatchSize`. Its value must be `-1` to represent
'wait forever', or `>= 1` to specify an actual time
- Subscription Name
- Subscription Name
If the source destination represents a topic, and you want to consume from
the topic using a durable subscription then this parameter represents the
durable subscription name
If the source destination represents a topic, and you want to
consume from the topic using a durable subscription then this
parameter represents the durable subscription name
- Client ID
- Client ID
If the source destination represents a topic, and you want to consume from
the topic using a durable subscription then this attribute represents the the
JMS client ID to use when creating/looking up the durable subscription
If the source destination represents a topic, and you want to
consume from the topic using a durable subscription then this
attribute represents the the JMS client ID to use when
creating/looking up the durable subscription
- Add MessageID In Header
- Add MessageID In Header
If `true`, then the original message's message ID will be appended in the
message sent to the destination in the header `ACTIVEMQ_BRIDGE_MSG_ID_LIST`. If
the message is bridged more than once, each message ID will be appended. This
enables a distributed request-response pattern to be used
If `true`, then the original message's message ID will be appended
in the message sent to the destination in the header
`ACTIVEMQ_BRIDGE_MSG_ID_LIST`. If the message is bridged more than
once, each message ID will be appended. This enables a distributed
request-response pattern to be used
> **Note:**
>
> when you receive the message you can send back a response using the
> correlation id of the first message id, so when the original sender gets it
> back it will be able to correlate it.
> **Note**
>
> when you receive the message you can send back a response using
> the correlation id of the first message id, so when the original
> sender gets it back it will be able to correlate it.
- MBean Server
- MBean Server
To manage the JMS Bridge using JMX, set the MBeanServer where the JMS Bridge
MBean must be registered (e.g. the JVM Platform MBeanServer)
To manage the JMS Bridge using JMX, set the MBeanServer where the
JMS Bridge MBean must be registered (e.g. the JVM Platform
MBeanServer or JBoss AS MBeanServer)
- ObjectName
- ObjectName
If you set the MBeanServer, you also need to set the ObjectName used
to register the JMS Bridge MBean (must be unique)
If you set the MBeanServer, you also need to set the ObjectName used to
register the JMS Bridge MBean (must be unique)
The "transactionManager" property points to a JTA transaction manager
implementation and should be set if you need to use the 'ONCE_AND_ONCE_ONLY'
Quality of Service. Apache ActiveMQ Artemis doesn't ship with such an implementation, but
if you are running within an Application Server you can inject the Transaction
Manager that is shipped.
Quality of Service. Apache ActiveMQ Artemis doesn't ship with such an
implementation, but if you are running within an Application Server you can
inject the Transaction Manager that is shipped.
## Source and Target Connection Factories
The source and target connection factory factories are used to create
the connection factory used to create the connection for the source or
target server.
The source and target connection factory factories are used to create the
connection factory used to create the connection for the source or target
server.
The configuration example above uses the default implementation provided
by Apache ActiveMQ Artemis that looks up the connection factory using JNDI. For other
Application Servers or JMS providers a new implementation may have to be
The configuration example above uses the default implementation provided by
Apache ActiveMQ Artemis that looks up the connection factory using JNDI. For
other Application Servers or JMS providers a new implementation may have to be
provided. This can easily be done by implementing the interface
`org.apache.activemq.artemis.jms.bridge.ConnectionFactoryFactory`.
## Source and Target Destination Factories
Again, similarly, these are used to create or lookup up the
destinations.
Again, similarly, these are used to create or lookup up the destinations.
In the configuration example above, we have used the default provided by
Apache ActiveMQ Artemis that looks up the destination using JNDI.
In the configuration example above, we have used the default provided by Apache
ActiveMQ Artemis that looks up the destination using JNDI.
A new implementation can be provided by implementing
`org.apache.activemq.artemis.jms.bridge.DestinationFactory` interface.
## Quality Of Service
The quality of service modes used by the bridge are described here in
more detail.
The quality of service modes used by the bridge are described here in more
detail.
### AT_MOST_ONCE
With this QoS mode messages will reach the destination from the source
at most once. The messages are consumed from the source and acknowledged
before sending to the destination. Therefore there is a possibility that
if failure occurs between removing them from the source and them
arriving at the destination they could be lost. Hence delivery will
occur at most once.
With this QoS mode messages will reach the destination from the source at most
once. The messages are consumed from the source and acknowledged before sending
to the destination. Therefore there is a possibility that if failure occurs
between removing them from the source and them arriving at the destination they
could be lost. Hence delivery will occur at most once.
This mode is available for both durable and non-durable messages.
@ -240,71 +225,51 @@ This mode is available for both durable and non-durable messages.
With this QoS mode, the messages are consumed from the source and then
acknowledged after they have been successfully sent to the destination.
Therefore there is a possibility that if failure occurs after sending to
the destination but before acknowledging them, they could be sent again
when the system recovers. I.e. the destination might receive duplicates
after a failure.
Therefore there is a possibility that if failure occurs after sending to the
destination but before acknowledging them, they could be sent again when the
system recovers. I.e. the destination might receive duplicates after a failure.
This mode is available for both durable and non-durable messages.
### ONCE_AND_ONLY_ONCE
This QoS mode ensures messages will reach the destination from the
source once and only once. (Sometimes this mode is known as "exactly
once"). If both the source and the destination are on the same Apache ActiveMQ Artemis
server instance then this can be achieved by sending and acknowledging
the messages in the same local transaction. If the source and
destination are on different servers this is achieved by enlisting the
sending and consuming sessions in a JTA transaction. The JTA transaction
is controlled by a JTA Transaction Manager which will need to be set
via the settransactionManager method on the Bridge.
This QoS mode ensures messages will reach the destination from the source once
and only once. (Sometimes this mode is known as "exactly once"). If both the
source and the destination are on the same Apache ActiveMQ Artemis server
instance then this can be achieved by sending and acknowledging the messages in
the same local transaction. If the source and destination are on different
servers this is achieved by enlisting the sending and consuming sessions in a
JTA transaction. The JTA transaction is controlled by a JTA Transaction Manager
which will need to be set via the settransactionManager method on the Bridge.
This mode is only available for durable messages.
> **Note**
> **Note:**
>
> For a specific application it may possible to provide once and only
> once semantics without using the ONCE\_AND\_ONLY\_ONCE QoS level. This
> can be done by using the DUPLICATES\_OK mode and then checking for
> duplicates at the destination and discarding them. Some JMS servers
> provide automatic duplicate message detection functionality, or this
> may be possible to implement on the application level by maintaining a
> cache of received message ids on disk and comparing received messages
> to them. The cache would only be valid for a certain period of time so
> this approach is not as watertight as using ONCE\_AND\_ONLY\_ONCE but
> may be a good choice depending on your specific application.
> For a specific application it may possible to provide once and only once
> semantics without using the ONCE\_AND\_ONLY\_ONCE QoS level. This can be done
> by using the DUPLICATES\_OK mode and then checking for duplicates at the
> destination and discarding them. Some JMS servers provide automatic duplicate
> message detection functionality, or this may be possible to implement on the
> application level by maintaining a cache of received message ids on disk and
> comparing received messages to them. The cache would only be valid for a
> certain period of time so this approach is not as watertight as using
> ONCE\_AND\_ONLY\_ONCE but may be a good choice depending on your specific
> application.
### Time outs and the JMS bridge
There is a possibility that the target or source server will not be
available at some point in time. If this occurs then the bridge will try
`Max Retries` to reconnect every `Failure Retry Interval` milliseconds
as specified in the JMS Bridge definition.
There is a possibility that the target or source server will not be available
at some point in time. If this occurs then the bridge will try `Max Retries` to
reconnect every `Failure Retry Interval` milliseconds as specified in the JMS
Bridge definition.
However since a third party JNDI is used, in this case the JBoss naming
server, it is possible for the JNDI lookup to hang if the network were
to disappear during the JNDI lookup. To stop this from occurring the
JNDI definition can be configured to time out if this occurs. To do this
set the `jnp.timeout` and the `jnp.sotimeout` on the Initial Context
definition. The first sets the connection timeout for the initial
connection and the second the read timeout for the socket.
> **Note**
>
> Once the initial JNDI connection has succeeded all calls are made
> using RMI. If you want to control the timeouts for the RMI connections
> then this can be done via system properties. JBoss uses Sun's RMI and
> the properties can be found
> [here](https://docs.oracle.com/javase/8/docs/technotes/guides/rmi/sunrmiproperties.html).
> The default connection timeout is 10 seconds and the default read
> timeout is 18 seconds.
If you implement your own factories for looking up JMS resources then
you will have to bear in mind timeout issues.
If you implement your own factories for looking up JMS resources then you will
have to bear in mind timeout issues.
### Examples
Please see [the examples chapter](examples.md) which shows how to configure and use a JMS Bridge with
JBoss AS to send messages to the source destination and consume them
from the target destination and how to configure and use a JMS Bridge between
two standalone Apache ActiveMQ Artemis servers.
Please see [JMS Bridge Example](examples.md#jms-bridge) which shows how to
programmatically instantiate and configure a JMS Bridge to send messages to the
source destination and consume them from the target destination between two
standalone Apache ActiveMQ Artemis brokers.

View File

@ -1,15 +1,15 @@
# Mapping JMS Concepts to the Core API
This chapter describes how JMS destinations are mapped to Apache ActiveMQ Artemis
addresses.
This chapter describes how JMS destinations are mapped to Apache ActiveMQ
Artemis addresses.
Apache ActiveMQ Artemis core is JMS-agnostic. It does not have any concept of a JMS
topic. A JMS topic is implemented in core as an address with name=(the topic name)
and with a MULTICAST routing type with zero or more queues bound to it. Each queue bound to that address
represents a topic subscription.
Apache ActiveMQ Artemis core is JMS-agnostic. It does not have any concept of a
JMS topic. A JMS topic is implemented in core as an address with name=(the
topic name) and with a MULTICAST routing type with zero or more queues bound to
it. Each queue bound to that address represents a topic subscription.
Likewise, a JMS queue is implemented as an address with name=(the JMS queue name) with an ANYCAST routing type assocatied
with it.
Likewise, a JMS queue is implemented as an address with name=(the JMS queue
name) with an ANYCAST routing type associated with it.
Note. That whilst it is possible to configure a JMS topic and queue with the same name, it is not a recommended
configuration for use with cross protocol.
**Note:** While it is possible to configure a JMS topic and queue with the same
name, it is not a recommended configuration for use with cross protocol.

View File

@ -1,154 +1,128 @@
# Large Messages
Apache ActiveMQ Artemis supports sending and receiving of huge messages, even when the
client and server are running with limited memory. The only realistic
limit to the size of a message that can be sent or consumed is the
amount of disk space you have available. We have tested sending and
consuming messages up to 8 GiB in size with a client and server running
in just 50MiB of RAM!
Apache ActiveMQ Artemis supports sending and receiving of huge messages, even
when the client and server are running with limited memory. The only realistic
limit to the size of a message that can be sent or consumed is the amount of
disk space you have available. We have tested sending and consuming messages up
to 8 GiB in size with a client and server running in just 50MiB of RAM!
To send a large message, the user can set an `InputStream` on a message
body, and when that message is sent, Apache ActiveMQ Artemis will read the
`InputStream`. A `FileInputStream` could be used for example to send a
huge message from a huge file on disk.
To send a large message, the user can set an `InputStream` on a message body,
and when that message is sent, Apache ActiveMQ Artemis will read the
`InputStream`. A `FileInputStream` could be used for example to send a huge
message from a huge file on disk.
As the `InputStream` is read the data is sent to the server as a stream
of fragments. The server persists these fragments to disk as it receives
them and when the time comes to deliver them to a consumer they are read
back of the disk, also in fragments and sent down the wire. When the
consumer receives a large message it initially receives just the message
with an empty body, it can then set an `OutputStream` on the message to
stream the huge message body to a file on disk or elsewhere. At no time
is the entire message body stored fully in memory, either on the client
or the server.
As the `InputStream` is read the data is sent to the server as a stream of
fragments. The server persists these fragments to disk as it receives them and
when the time comes to deliver them to a consumer they are read back of the
disk, also in fragments and sent down the wire. When the consumer receives a
large message it initially receives just the message with an empty body, it can
then set an `OutputStream` on the message to stream the huge message body to a
file on disk or elsewhere. At no time is the entire message body stored fully
in memory, either on the client or the server.
## Configuring the server
Large messages are stored on a disk directory on the server side, as
configured on the main configuration file.
Large messages are stored on a disk directory on the server side, as configured
on the main configuration file.
The configuration property `large-messages-directory` specifies where
large messages are stored. For JDBC persistence the `large-message-table`
should be configured.
The configuration property `large-messages-directory` specifies where large
messages are stored. For JDBC persistence the `large-message-table` should be
configured.
```xml
<configuration xmlns="urn:activemq"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:activemq /schema/artemis-server.xsd">
...
<large-messages-directory>/data/large-messages</large-messages-directory>
...
</configuration
<core xmlns="urn:activemq:core" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="urn:activemq:core">
...
<large-messages-directory>/data/large-messages</large-messages-directory>
...
</core>
</configuration>
```
By default the large message directory is `data/largemessages` and `large-message-table` is
configured as "LARGE_MESSAGE_TABLE".
By default the large message directory is `data/largemessages` and
`large-message-table` is configured as "LARGE_MESSAGE_TABLE".
For the best performance we recommend using file store with large messages directory stored
on a different physical volume to the message journal or paging directory.
For the best performance we recommend using file store with large messages
directory stored on a different physical volume to the message journal or
paging directory.
## Configuring the Client
Any message larger than a certain size is considered a large message.
Large messages will be split up and sent in fragments. This is
determined by the URL parameter `minLargeMessageSize`
Any message larger than a certain size is considered a large message. Large
messages will be split up and sent in fragments. This is determined by the URL
parameter `minLargeMessageSize`
> **Note**
> **Note:**
>
> Apache ActiveMQ Artemis messages are encoded using 2 bytes per character so if the
> message data is filled with ASCII characters (which are 1 byte) the
> size of the resulting Apache ActiveMQ Artemis message would roughly double. This is
> important when calculating the size of a "large" message as it may
> appear to be less than the `minLargeMessageSize` before it is sent,
> but it then turns into a "large" message once it is encoded.
> Apache ActiveMQ Artemis messages are encoded using 2 bytes per character so
> if the message data is filled with ASCII characters (which are 1 byte) the
> size of the resulting Apache ActiveMQ Artemis message would roughly double.
> This is important when calculating the size of a "large" message as it may
> appear to be less than the `minLargeMessageSize` before it is sent, but it
> then turns into a "large" message once it is encoded.
The default value is 100KiB.
[Configuring the transport directly from the client side](configuring-transports.md)
will provide more information on how to instantiate the core session factory
or JMS connection factory.
[Configuring the transport directly from the client
side](configuring-transports.md#configuring-the-transport-directly-from-the-client)
will provide more information on how to instantiate the core session factory or
JMS connection factory.
## Compressed Large Messages
You can choose to send large messages in compressed form using
`compressLargeMessages` URL parameter.
If you specify the boolean URL parameter `compressLargeMessages` as true,
The system will use the ZIP algorithm to compress the message body as
the message is transferred to the server's side. Notice that there's no
special treatment at the server's side, all the compressing and uncompressing
is done at the client.
If you specify the boolean URL parameter `compressLargeMessages` as true, The
system will use the ZIP algorithm to compress the message body as the message
is transferred to the server's side. Notice that there's no special treatment
at the server's side, all the compressing and uncompressing is done at the
client.
If the compressed size of a large message is below `minLargeMessageSize`,
it is sent to server as regular messages. This means that the message won't
be written into the server's large-message data directory, thus reducing the
disk I/O.
If the compressed size of a large message is below `minLargeMessageSize`, it is
sent to server as regular messages. This means that the message won't be
written into the server's large-message data directory, thus reducing the disk
I/O.
## Streaming large messages
Apache ActiveMQ Artemis supports setting the body of messages using input and output
streams (`java.lang.io`)
Apache ActiveMQ Artemis supports setting the body of messages using input and
output streams (`java.lang.io`)
These streams are then used directly for sending (input streams) and
receiving (output streams) messages.
These streams are then used directly for sending (input streams) and receiving
(output streams) messages.
When receiving messages there are 2 ways to deal with the output stream;
you may choose to block while the output stream is recovered using the
method `ClientMessage.saveOutputStream` or alternatively using the
method `ClientMessage.setOutputstream` which will asynchronously write
the message to the stream. If you choose the latter the consumer must be
kept alive until the message has been fully received.
When receiving messages there are 2 ways to deal with the output stream; you
may choose to block while the output stream is recovered using the method
`ClientMessage.saveOutputStream` or alternatively using the method
`ClientMessage.setOutputstream` which will asynchronously write the message to
the stream. If you choose the latter the consumer must be kept alive until the
message has been fully received.
You can use any kind of stream you like. The most common use case is to
send files stored in your disk, but you could also send things like JDBC
Blobs, `SocketInputStream`, things you recovered from `HTTPRequests`
etc. Anything as long as it implements `java.io.InputStream` for sending
messages or `java.io.OutputStream` for receiving them.
You can use any kind of stream you like. The most common use case is to send
files stored in your disk, but you could also send things like JDBC Blobs,
`SocketInputStream`, things you recovered from `HTTPRequests` etc. Anything as
long as it implements `java.io.InputStream` for sending messages or
`java.io.OutputStream` for receiving them.
### Streaming over Core API
The following table shows a list of methods available at `ClientMessage`
which are also available through JMS by the use of object properties.
The following table shows a list of methods available at `ClientMessage` which
are also available through JMS by the use of object properties.
<table summary="org.hornetq.api.core.client.ClientMessage API" border="1">
<colgroup>
<col/>
<col/>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th>Name</th>
<th>Description</th>
<th>JMS Equivalent</th>
</tr>
</thead>
<tbody>
<tr>
<td>setBodyInputStream(InputStream)</td>
<td>Set the InputStream used to read a message body when sending it.</td>
<td>JMS_AMQ_InputStream</td>
</tr>
<tr>
<td>setOutputStream(OutputStream)</td>
<td>Set the OutputStream that will receive the body of a message. This method does not block.</td>
<td>JMS_AMQ_OutputStream</td>
</tr>
<tr>
<td>saveOutputStream(OutputStream)</td>
<td>Save the body of the message to the `OutputStream`. It will block until the entire content is transferred to the `OutputStream`.</td>
<td>JMS_AMQ_SaveStream</td>
</tr>
</tbody>
</table>
Name | Description | JMS Equivalent
---|---|---
setBodyInputStream(InputStream)|Set the InputStream used to read a message body when sending it.|JMS_AMQ_InputStream
setOutputStream(OutputStream)|Set the OutputStream that will receive the body of a message. This method does not block.|JMS_AMQ_OutputStream
saveOutputStream(OutputStream)|Save the body of the message to the `OutputStream`. It will block until the entire content is transferred to the `OutputStream`.|JMS_AMQ_SaveStream
To set the output stream when receiving a core message:
``` java
ClientMessage msg = consumer.receive(...);
// This will block here until the stream was transferred
msg.saveOutputStream(someOutputStream);
@ -165,16 +139,15 @@ ClientMessage msg = session.createMessage();
msg.setInputStream(dataInputStream);
```
Notice also that for messages with more than 2GiB the getBodySize() will
return invalid values since this is an integer (which is also exposed to
the JMS API). On those cases you can use the message property
_AMQ_LARGE_SIZE.
Notice also that for messages with more than 2GiB the getBodySize() will return
invalid values since this is an integer (which is also exposed to the JMS API).
On those cases you can use the message property _AMQ_LARGE_SIZE.
### Streaming over JMS
When using JMS, Apache ActiveMQ Artemis maps the streaming methods on the core API (see
ClientMessage API table above) by setting object properties . You can use the method
`Message.setObjectProperty` to set the input and output streams.
When using JMS, Apache ActiveMQ Artemis maps the streaming methods on the core
API (see ClientMessage API table above) by setting object properties . You can
use the method `Message.setObjectProperty` to set the input and output streams.
The `InputStream` can be defined through the JMS Object Property
JMS_AMQ_InputStream on messages being sent:
@ -215,16 +188,16 @@ using the property JMS_AMQ_OutputStream.
messageReceived.setObjectProperty("JMS_AMQ_OutputStream", bufferedOutput);
```
> **Note**
> **Note:**
>
> When using JMS, Streaming large messages are only supported on
> `StreamMessage` and `BytesMessage`.
### Streaming Alternative
If you choose not to use the `InputStream` or `OutputStream` capability
of Apache ActiveMQ Artemis You could still access the data directly in an alternative
fashion.
If you choose not to use the `InputStream` or `OutputStream` capability of
Apache ActiveMQ Artemis You could still access the data directly in an
alternative fashion.
On the Core API just get the bytes of the body as you normally would.
@ -241,6 +214,7 @@ for (int i = 0 ; i < msg.getBodySize(); i += bytes.length)
If using JMS API, `BytesMessage` and `StreamMessage` also supports it
transparently.
``` java
BytesMessage rm = (BytesMessage)cons.receive(10000);
@ -255,5 +229,5 @@ for (int i = 0; i < rm.getBodyLength(); i += 1024)
## Large message example
Please see the [examples](examples.md) chapter for an example which shows
how large message is configured and used with JMS.
Please see the [Large Message Example](examples.md#large-message) which shows
how large messages are configured and used with JMS.

View File

@ -14,26 +14,26 @@ Last-Value queues can be statically configured via the `last-value`
boolean property:
```xml
<configuration ...>
<core ...>
...
<address name="foo.bar">
<multicast>
<queue name="orders1" last-value="true"/>
</multicast>
</address>
</core>
</configuration>
<address name="foo.bar">
<multicast>
<queue name="orders1" last-value="true"/>
</multicast>
</address>
```
Specified on creating a Queue by using the CORE api specifying the parameter `lastValue` to `true`.
Specified on creating a queue by using the CORE api specifying the parameter
`lastValue` to `true`.
Or on auto-create when using the JMS Client by using address parameters when creating the destination used by the consumer.
Or on auto-create when using the JMS Client by using address parameters when
creating the destination used by the consumer.
Queue queue = session.createQueue("my.destination.name?last-value=true");
Topic topic = session.createTopic("my.destination.name?last-value=true");
```java
Queue queue = session.createQueue("my.destination.name?last-value=true");
Topic topic = session.createTopic("my.destination.name?last-value=true");
```
Also the default for all queues under and address can be defaulted using the address-setting configuration:
Also the default for all queues under and address can be defaulted using the
`address-setting` configuration:
```xml
<address-setting match="lastValueQueue">
@ -45,7 +45,8 @@ By default, `default-last-value-queue` is false.
Address wildcards can be used to configure Last-Value queues
for a set of addresses (see [here](wildcard-syntax.md)).
Note that address-setting `last-value-queue` config is deprecated, please use `default-last-value-queue` instead.
Note that `address-setting` `last-value-queue` config is deprecated, please use
`default-last-value-queue` instead.
## Last-Value Property
@ -77,5 +78,5 @@ System.out.format("Received message: %s\n", messageReceived.getText());
## Example
See the [examples](examples.md) chapter for an example which shows how last value queues are configured
and used with JMS.
See the [last-value queue example](examples.md#last-value-queue) which shows
how last value queues are configured and used with JMS.

View File

@ -13,8 +13,8 @@ please see [Persistence](persistence.md).
These are the native libraries distributed by Apache ActiveMQ Artemis:
- libartemis-native-64.so - x86 64 bits
- We distributed a 32-bit version until early 2017. While it's not available on the distribution any longer it should still be possible to compile to a 32-bit environment if needed.
- libartemis-native-64.so - x86 64 bits
- We distributed a 32-bit version until early 2017. While it's not available on the distribution any longer it should still be possible to compile to a 32-bit environment if needed.
When using libaio, Apache ActiveMQ Artemis will always try loading these files as long
as they are on the [library path](using-server.md#library-path)
@ -28,12 +28,15 @@ You can install libaio using the following steps as the root user:
Using yum, (e.g. on Fedora or Red Hat Enterprise Linux):
yum install libaio
```
yum install libaio
```
Using aptitude, (e.g. on Ubuntu or Debian system):
apt-get install libaio
```
apt-get install libaio
```
## Compiling the native libraries
@ -44,26 +47,26 @@ those platforms with the release.
## Compilation dependencies
> **Note**
> **Note:**
>
> The native layer is only available on Linux. If you are
> in a platform other than Linux the native compilation will not work
These are the required linux packages to be installed for the compilation to work:
- gcc - C Compiler
- gcc - C Compiler
- gcc-c++ or g++ - Extension to gcc with support for C++
- gcc-c++ or g++ - Extension to gcc with support for C++
- libtool - Tool for link editing native libraries
- libtool - Tool for link editing native libraries
- libaio - library to disk asynchronous IO kernel functions
- libaio - library to disk asynchronous IO kernel functions
- libaio-dev - Compilation support for libaio
- libaio-dev - Compilation support for libaio
- cmake
- cmake
- A full JDK installed with the environment variable JAVA\_HOME set to
- A full JDK installed with the environment variable JAVA\_HOME set to
its location
To perform this installation on RHEL or Fedora, you can simply type this at a command line:
@ -74,7 +77,7 @@ Or on Debian systems:
sudo apt-get install libtool gcc-g++ gcc libaio libaio- cmake
> **Note**
> **Note:**
>
> You could find a slight variation of the package names depending on
> the version and Linux distribution. (for example gcc-c++ on Fedora

View File

@ -7,63 +7,34 @@ the console and to a file.
There are 6 loggers available which are as follows:
<table summary="Loggers" border="1">
<colgroup>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th>Logger</th>
<th>Logger Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>org.jboss.logging</td>
<td>Logs any calls not handled by the Apache ActiveMQ Artemis loggers</td>
</tr>
<tr>
<td>org.apache.activemq.artemis.core.server</td>
<td>Logs the core server</td>
</tr>
<tr>
<td>org.apache.activemq.artemis.utils</td>
<td>Logs utility calls</td>
</tr>
<tr>
<td>org.apache.activemq.artemis.journal</td>
<td>Logs Journal calls</td>
</tr>
<tr>
<td>org.apache.activemq.artemis.jms</td>
<td>Logs JMS calls</td>
</tr>
<tr>
<td>org.apache.activemq.artemis.integration.bootstrap </td>
<td>Logs bootstrap calls</td>
</tr>
</tbody>
</table>
Logger | Description
---|---
org.jboss.logging|Logs any calls not handled by the Apache ActiveMQ Artemis loggers
org.apache.activemq.artemis.core.server|Logs the core server
org.apache.activemq.artemis.utils|Logs utility calls
org.apache.activemq.artemis.journal|Logs Journal calls
org.apache.activemq.artemis.jms|Logs JMS calls
org.apache.activemq.artemis.integration.bootstrap|Logs bootstrap calls
: Global Configuration Properties
## Logging in a client or with an Embedded server
Firstly, if you want to enable logging on the client side you need to
include the JBoss logging jars in your library. If you are using maven
add the following dependencies.
include the JBoss logging jars in your library. If you are using Maven
the simplest way is to use the "all" client jar.
<dependency>
<groupId>org.jboss.logmanager</groupId>
<artifactId>jboss-logmanager</artifactId>
<version>1.5.3.Final</version>
</dependency>
<dependency>
<groupId>org.apache.activemq</groupId>
<artifactId>activemq-core-client</artifactId>
<version>1.0.0.Final</version>
</dependency>
```xml
<dependency>
<groupId>org.jboss.logmanager</groupId>
<artifactId>jboss-logmanager</artifactId>
<version>2.0.3.Final</version>
</dependency>
<dependency>
<groupId>org.apache.activemq</groupId>
<artifactId>activemq-core-client</artifactId>
<version>2.5.0</version>
</dependency>
```
There are 2 properties you need to set when starting your java program,
the first is to set the Log Manager to use the JBoss Log Manager, this
@ -74,41 +45,43 @@ The second is to set the location of the logging.properties file to use,
this is done via the `-Dlogging.configuration` for instance
`-Dlogging.configuration=file:///home/user/projects/myProject/logging.properties`.
> **Note**
> **Note:**
>
> The value for this needs to be valid URL
> The `logging.configuration` system property needs to be valid URL
The following is a typical `logging.properties for a client`
# Root logger option
loggers=org.jboss.logging,org.apache.activemq.artemis.core.server,org.apache.activemq.artemis.utils,org.apache.activemq.artemis.journal,org.apache.activemq.artemis.jms,org.apache.activemq.artemis.ra
```
# Root logger option
loggers=org.jboss.logging,org.apache.activemq.artemis.core.server,org.apache.activemq.artemis.utils,org.apache.activemq.artemis.journal,org.apache.activemq.artemis.jms,org.apache.activemq.artemis.ra
# Root logger level
logger.level=INFO
# Apache ActiveMQ Artemis logger levels
logger.org.apache.activemq.artemis.core.server.level=INFO
logger.org.apache.activemq.artemis.utils.level=INFO
logger.org.apache.activemq.artemis.jms.level=DEBUG
# Root logger level
logger.level=INFO
# Apache ActiveMQ Artemis logger levels
logger.org.apache.activemq.artemis.core.server.level=INFO
logger.org.apache.activemq.artemis.utils.level=INFO
logger.org.apache.activemq.artemis.jms.level=DEBUG
# Root logger handlers
logger.handlers=FILE,CONSOLE
# Root logger handlers
logger.handlers=FILE,CONSOLE
# Console handler configuration
handler.CONSOLE=org.jboss.logmanager.handlers.ConsoleHandler
handler.CONSOLE.properties=autoFlush
handler.CONSOLE.level=FINE
handler.CONSOLE.autoFlush=true
handler.CONSOLE.formatter=PATTERN
# Console handler configuration
handler.CONSOLE=org.jboss.logmanager.handlers.ConsoleHandler
handler.CONSOLE.properties=autoFlush
handler.CONSOLE.level=FINE
handler.CONSOLE.autoFlush=true
handler.CONSOLE.formatter=PATTERN
# File handler configuration
handler.FILE=org.jboss.logmanager.handlers.FileHandler
handler.FILE.level=FINE
handler.FILE.properties=autoFlush,fileName
handler.FILE.autoFlush=true
handler.FILE.fileName=activemq.log
handler.FILE.formatter=PATTERN
# File handler configuration
handler.FILE=org.jboss.logmanager.handlers.FileHandler
handler.FILE.level=FINE
handler.FILE.properties=autoFlush,fileName
handler.FILE.autoFlush=true
handler.FILE.fileName=activemq.log
handler.FILE.formatter=PATTERN
# Formatter pattern configuration
formatter.PATTERN=org.jboss.logmanager.formatters.PatternFormatter
formatter.PATTERN.properties=pattern
formatter.PATTERN.pattern=%d{HH:mm:ss,SSS} %-5p [%c] %s%E%n
# Formatter pattern configuration
formatter.PATTERN=org.jboss.logmanager.formatters.PatternFormatter
formatter.PATTERN.properties=pattern
formatter.PATTERN.pattern=%d{HH:mm:ss,SSS} %-5p [%c] %s%E%n
```

View File

@ -4,7 +4,6 @@ Apache ActiveMQ Artemis ships by default with a management console. It is powere
Its purpose is to expose the [Management API](management.md "Management API") via a user friendly web ui.
## Login
To access the management console use a browser and go to the URL [http://localhost:8161/console]().
@ -30,29 +29,27 @@ Once logged in you should be presented with a screen similar to.
On the top right is small menu area you will see some icons.
- `question mark` This will load the artemis documentation in the console main window
- `person` will provide a drop down menu with
- - `about` this will load an about screen, here you will be able to see and validate versions
- - `log out` self descriptive.
- `question mark` This will load the artemis documentation in the console main window
- `person` will provide a drop down menu with
- `about` this will load an about screen, here you will be able to see and validate versions
- `log out` self descriptive.
#### Navigation Tabs
Running below the Navigation Menu you will see several default feature tabs.
- `Artemis` This is the core tab for Apache ActiveMQ Artemis specific functionality. The rest of this document will focus on this.
- `Artemis` This is the core tab for Apache ActiveMQ Artemis specific functionality. The rest of this document will focus on this.
- `Connect` This allows you to connect to a remote broker from the same console.
- `Connect` This allows you to connect to a remote broker from the same console.
- `Dashboard` Here you can create and save graphs and tables of metrics available via JMX, a default jvm health dashboard is provided.
- `Dashboard` Here you can create and save graphs and tables of metrics available via JMX, a default jvm health dashboard is provided.
- `JMX` This exposes the raw Jolokia JMX so you can browse/access all the JMX endpoints exposed by the JVM.
- `JMX` This exposes the raw Jolokia JMX so you can browse/access all the JMX endpoints exposed by the JVM.
- `Threads` This allows you to monitor the thread usage and their state.
- `Threads` This allows you to monitor the thread usage and their state.
You can install further hawtio plugins if you wish to have further functionality.
## Artemis Tab
Click `Artemis` in the top navigation bar to see the Artemis specific plugin. (The Artemis tab won't appear if there is no broker in this JVM). The Artemis plugin works very much the same as the JMX plugin however with a focus on interacting with an Artemis broker.
@ -71,8 +68,6 @@ This expands to show the current configured available `addresses`.
Under the address you can expand to find the `queues` for the address exposing attributes
### Key Operations
#### Creating a new Address

File diff suppressed because it is too large Load Diff

View File

@ -1,65 +1,67 @@
# Masking Passwords
By default all passwords in Apache ActiveMQ Artemis server's configuration files are in
plain text form. This usually poses no security issues as those files
should be well protected from unauthorized accessing. However, in some
circumstances a user doesn't want to expose its passwords to more eyes
than necessary.
By default all passwords in Apache ActiveMQ Artemis server's configuration
files are in plain text form. This usually poses no security issues as those
files should be well protected from unauthorized accessing. However, in some
circumstances a user doesn't want to expose its passwords to more eyes than
necessary.
Apache ActiveMQ Artemis can be configured to use 'masked' passwords in its
configuration files. A masked password is an obscure string
representation of a real password. To mask a password a user will use an
'encoder'. The encoder takes in the real password and outputs the masked
version. A user can then replace the real password in the configuration
files with the new masked password. When Apache ActiveMQ Artemis loads a masked
password, it uses a suitable 'decoder' to decode it into real password.
configuration files. A masked password is an obscure string representation of a
real password. To mask a password a user will use an 'encoder'. The encoder
takes in the real password and outputs the masked version. A user can then
replace the real password in the configuration files with the new masked
password. When Apache ActiveMQ Artemis loads a masked password, it uses a
suitable 'decoder' to decode it into real password.
Apache ActiveMQ Artemis provides a default password encoder and decoder. Optionally
users can use or implement their own encoder and decoder for masking the
passwords.
Apache ActiveMQ Artemis provides a default password encoder and decoder.
Optionally users can use or implement their own encoder and decoder for masking
the passwords.
In general, a masked password can be identified using one of two ways. The first one
is the ENC() syntax, i.e. any string value wrapped in ENC() is to be treated as
a masked password. For example
In general, a masked password can be identified using one of two ways. The
first one is the `ENC()` syntax, i.e. any string value wrapped in `ENC()` is to
be treated as a masked password. For example
`ENC(xyz)`
The above indicates that the password is masked and the masked value is `xyz`.
The ENC() syntax is the preferred way to indicating a masked password and is
The `ENC()` syntax is the **preferred way** of masking a password and is
universally supported in every password configuration in Artemis.
The other way is to use a `mask-password` attribute to tell that a password
in a configuration file should be treated as 'masked'. For example:
The other way is to use a `mask-password` attribute to tell that a password in
a configuration file should be treated as 'masked'. For example:
```xml
<mask-password>true</mask-password>
<cluster-password>xyz</cluster-password>
```
This method is now deprecated and exists only to maintain backward-compatibility.
Newer configurations may not support it.
This method is now **deprecated** and exists only to maintain
backward-compatibility. Newer configurations may not support it.
### Password Masking in Server Configuration File
#### General Masking Configuration
Besides supporting the ENC() syntax, the server configuration file (i.e. broker.xml)
has a property that defines the default masking behaviors over the entire file scope.
Besides supporting the `ENC()` syntax, the server configuration file (i.e.
broker.xml) has a property that defines the default masking behaviors over the
entire file scope.
`mask-password`: this boolean type property indicates if a password
should be masked or not. Set it to "true" if you want your passwords
masked. The default value is "false".
`mask-password`: this boolean type property indicates if a password should be
masked or not. Set it to "true" if you want your passwords masked. The default
value is "false".
`password-codec`: this string type property identifies the name of the class
which will be used to decode the masked password within the broker. If not
specified then the default `org.apache.activemq.artemis.utils.DefaultSensitiveStringCodec`
will be used.
specified then the default
`org.apache.activemq.artemis.utils.DefaultSensitiveStringCodec` will be used.
#### Specific Masking Behaviors
##### cluster-password
If it is specified in ENC() syntax it will be treated as masked, or
If it is specified in `ENC()` syntax it will be treated as masked, or
If `mask-password` is `true` the `cluster-password` will be treated as masked.
@ -73,76 +75,77 @@ will have different password masking needs.
When a `connector` or `acceptor` is initialised, Apache ActiveMQ Artemis will
add the aforementioned `mask-password` and `password-codec` values to the
`connector` or `acceptor` parameters using the keys `activemq.usemaskedpassword`
and `activemq.passwordcodec` respectively. The Netty and InVM implementations
will use these as needed and any other implementations will have access to
these to use if they so wish.
`connector` or `acceptor` parameters using the keys
`activemq.usemaskedpassword` and `activemq.passwordcodec` respectively. The
Netty and InVM implementations will use these as needed and any other
implementations will have access to these to use if they so wish.
The preferred way, however, is to use the ENC() syntax.
The preferred way, however, is to use the `ENC()` syntax.
##### Core Bridges
Core Bridges are configured in the server configuration file and so the
masking of its `password` properties follows the same rules as that of
`cluster-password`. It supports ENC() syntax.
Core Bridges are configured in the server configuration file and so the masking
of its `password` properties follows the same rules as that of
`cluster-password`. It supports `ENC()` syntax.
For using `mask-password` property, the following table summarizes the
For using `mask-password` property, the following table summarizes the
relations among the above-mentioned properties
mask-password | cluster-password | acceptor/connector passwords | bridge password
:------------- | :---------------- | :--------------------------- | :---------------
absent | plain text | plain text | plain text
false | plain text | plain text | plain text
true | masked | masked | masked
mask-password | cluster-password | acceptor/connector passwords | bridge password
--- | --- | --- | ---
absent|plain text|plain text|plain text
false|plain text|plain text|plain text
true|masked|masked|masked
It is recommended that you use the `ENC()` syntax for new applications/deployments.
It is recommended that you use the `ENC()` syntax for new
applications/deployments.
#### Examples
Note: In the following examples if related attributed or properties are
**Note:** In the following examples if related attributed or properties are
absent, it means they are not specified in the configure file.
example 1
- Unmasked
```xml
<cluster-password>bbc</cluster-password>
```
```xml
<cluster-password>bbc</cluster-password>
```
This indicates the cluster password is a plain text value ("bbc").
This indicates the cluster password is a plain text value `bbc`.
example 2
- Masked 1
```xml
<cluster-password>ENC(xyz)</cluster-password>
```
```xml
<cluster-password>ENC(xyz)</cluster-password>
```
This indicates the cluster password is a masked value ("xyz").
This indicates the cluster password is a masked value `xyz`.
example 3
- Masked 2
```xml
<mask-password>true</mask-password>
<cluster-password>80cf731af62c290</cluster-password>
```
```xml
<mask-password>true</mask-password>
<cluster-password>80cf731af62c290</cluster-password>
```
This indicates the cluster password is a masked value and Apache ActiveMQ Artemis will
use its built-in decoder to decode it. All other passwords in the
configuration file, Connectors, Acceptors and Bridges, will also use
masked passwords.
This indicates the cluster password is a masked value and Apache ActiveMQ
Artemis will use its built-in decoder to decode it. All other passwords in the
configuration file, Connectors, Acceptors and Bridges, will also use masked
passwords.
#### Passwords in bootstrap.xml
The broker embeds a web-server for hosting some web applications such as a
management console. It is configured in bootstrap.xml as a web
component. The web server can be secured using https protocol, and it can be
configured with a keystore password and/or truststore password which by
default are specified in plain text forms.
management console. It is configured in bootstrap.xml as a web component. The
web server can be secured using https protocol, and it can be configured with a
keystore password and/or truststore password which by default are specified in
plain text forms.
To mask these passwords you need to use ENC() syntax. The `mask-password` is
not supported here.
To mask these passwords you need to use `ENC()` syntax. The `mask-password`
boolean is not supported here.
You can also set the `passwordCodec` attribute if you want to use a password codec
other than the default one. For example
You can also set the `passwordCodec` attribute if you want to use a password
codec other than the default one. For example
```xml
<web bind="https://localhost:8443" path="web"
@ -154,24 +157,23 @@ other than the default one. For example
### Passwords for the JCA Resource Adapter
Both ra.xml and MDB activation configuration have a `password` property
that can be masked preferably using ENC() syntax.
Both ra.xml and MDB activation configuration have a `password` property that
can be masked preferably using `ENC()` syntax.
Alternatively it can use a optional attribute in ra.xml to indicate that a password
is masked:
Alternatively it can use a optional attribute in ra.xml to indicate that a
password is masked:
`UseMaskedPassword` -- If setting to "true" the passwords are masked.
Default is false.
`UseMaskedPassword` -- If setting to "true" the passwords are masked. Default
is false.
There is another property in ra.xml that can specify a codec:
`PasswordCodec` -- Class name and its parameters for the Decoder used to
decode the masked password. Ignored if UseMaskedPassword is false. The
format of this property is a full qualified class name optionally
followed by key/value pairs. It is the same format as that for JMS
Bridges. Example:
`PasswordCodec` -- Class name and its parameters for the Decoder used to decode
the masked password. Ignored if UseMaskedPassword is false. The format of this
property is a full qualified class name optionally followed by key/value pairs.
It is the same format as that for JMS Bridges. Example:
Example 1 Using the ENC() syntax:
Example 1 Using the `ENC()` syntax:
```xml
<config-property>
@ -206,16 +208,17 @@ Example 2 Using the "UseMaskedPassword" property:
</config-property>
```
With this configuration, both passwords in ra.xml and all of its MDBs
will have to be in masked form.
With this configuration, both passwords in ra.xml and all of its MDBs will have
to be in masked form.
### Passwords in artemis-users.properties
Apache ActiveMQ Artemis's built-in security manager uses plain properties files
where the user passwords are specified in a hashed form by default. Note, the passwords
are technically *hashed* rather than masked in this context. The default `PropertiesLoginModule`
will not decode the passwords in `artemis-users.properties` but will instead hash the input
and compare the two hashed values for password verification.
where the user passwords are specified in a hashed form by default. Note, the
passwords are technically *hashed* rather than masked in this context. The
default `PropertiesLoginModule` will not decode the passwords in
`artemis-users.properties` but will instead hash the input and compare the two
hashed values for password verification.
Please use Artemis CLI command to add a password. For example:
@ -223,26 +226,27 @@ Please use Artemis CLI command to add a password. For example:
./artemis user add --username guest --password guest --role admin
```
This will use the default `org.apache.activemq.artemis.utils.DefaultSensitiveStringCodec`
to perform a "one-way" hash of the password and alter both the `artemis-users.properties`
This will use the default
`org.apache.activemq.artemis.utils.DefaultSensitiveStringCodec` to perform a
"one-way" hash of the password and alter both the `artemis-users.properties`
and `artemis-roles.properties` files with the specified values.
Passwords in `artemis-users.properties` are automatically detected as hashed or not
by looking for the syntax `ENC(<hash>)`. The `mask-password` parameter does not need
to be `true` to use hashed passwords here.
Passwords in `artemis-users.properties` are automatically detected as hashed or
not by looking for the syntax `ENC(<hash>)`. The `mask-password` parameter does
not need to be `true` to use hashed passwords here.
### Password in login.config
Artemis supports LDAP login modules to be configured in JAAS configuration
file (default name is `login.config`). When connecting to a LDAP server usually
you need to supply a connection password in the config file. By default this
Artemis supports LDAP login modules to be configured in JAAS configuration file
(default name is `login.config`). When connecting to a LDAP server usually you
need to supply a connection password in the config file. By default this
password is in plain text form.
To mask it you need to configure the passwords in your login module
using ENC() syntax. To specify a codec using the following property:
To mask it you need to configure the passwords in your login module using
`ENC()` syntax. To specify a codec using the following property:
`passwordCodec` - the password codec class name. (the default codec
will be used if it is absent)
`passwordCodec` - the password codec class name. (the default codec will be
used if it is absent)
For example:
@ -270,24 +274,24 @@ LDAPLoginExternalPasswordCodec {
### Choosing a decoder for password masking
As described in the previous sections, all password masking requires a
decoder. A decoder uses an algorithm to convert a masked password into
its original clear text form in order to be used in various security
operations. The algorithm used for decoding must match that for
encoding. Otherwise the decoding may not be successful.
As described in the previous sections, all password masking requires a decoder.
A decoder uses an algorithm to convert a masked password into its original
clear text form in order to be used in various security operations. The
algorithm used for decoding must match that for encoding. Otherwise the
decoding may not be successful.
For user's convenience Apache ActiveMQ Artemis provides a default decoder.
However a user can implement their own if they wish.
#### The Default Decoder
Whenever no decoder is specified in the configuration file, the default
decoder is used. The class name for the default decoder is
`org.apache.activemq.artemis.utils.DefaultSensitiveStringCodec`. It has hashing,
encoding, and decoding capabilities. It uses `java.crypto.Cipher` utilities
to hash or encode a plaintext password and also to decode a masked string using
same algorithm and key. Using this decoder/encoder is pretty straightforward. To
get a mask for a password, just run the `mask` command:
Whenever no decoder is specified in the configuration file, the default decoder
is used. The class name for the default decoder is
`org.apache.activemq.artemis.utils.DefaultSensitiveStringCodec`. It has
hashing, encoding, and decoding capabilities. It uses `java.crypto.Cipher`
utilities to hash or encode a plaintext password and also to decode a masked
string using same algorithm and key. Using this decoder/encoder is pretty
straightforward. To get a mask for a password, just run the `mask` command:
```sh
./artemis mask <plaintextPassword>
@ -304,18 +308,19 @@ plaintext password in broker.xml with it.
#### Using a custom decoder
It is possible to use a custom decoder rather than the built-in one.
Simply make sure the decoder is in Apache ActiveMQ Artemis's classpath. The custom decoder
can also be service loaded rather than class loaded, if the decoder's service provider is installed in the classpath.
Then configure the server to use it as follows:
It is possible to use a custom decoder rather than the built-in one. Simply
make sure the decoder is in Apache ActiveMQ Artemis's classpath. The custom
decoder can also be service loaded rather than class loaded, if the decoder's
service provider is installed in the classpath. Then configure the server to
use it as follows:
```xml
<password-codec>com.foo.SomeDecoder;key1=value1;key2=value2</password-codec>
```
If your decoder needs params passed to it you can do this via key/value
pairs when configuring. For instance if your decoder needs say a
"key-location" parameter, you can define like so:
If your decoder needs params passed to it you can do this via key/value pairs
when configuring. For instance if your decoder needs say a "key-location"
parameter, you can define like so:
```xml
<password-codec>com.foo.NewDecoder;key-location=/some/url/to/keyfile</password-codec>
@ -328,15 +333,14 @@ Then configure your cluster-password like this:
```
When Apache ActiveMQ Artemis reads the cluster-password it will initialize the
NewDecoder and use it to decode "mask\_password". It also process all
passwords using the new defined decoder.
NewDecoder and use it to decode "mask\_password". It also process all passwords
using the new defined decoder.
#### Implementing Custom Codecs
To use a different decoder than the built-in one, you either pick one
from existing libraries or you implement it yourself. All decoders must
implement the `org.apache.activemq.artemis.utils.SensitiveDataCodec<T>`
interface:
To use a different decoder than the built-in one, you either pick one from
existing libraries or you implement it yourself. All decoders must implement
the `org.apache.activemq.artemis.utils.SensitiveDataCodec<T>` interface:
```java
public interface SensitiveDataCodec<T>
@ -347,8 +351,8 @@ public interface SensitiveDataCodec<T>
}
```
This is a generic type interface but normally for a password you just
need String type. So a new decoder would be defined like
This is a generic type interface but normally for a password you just need
String type. So a new decoder would be defined like
```java
public class MyNewDecoder implements SensitiveDataCodec<String>
@ -367,4 +371,5 @@ public class MyNewDecoder implements SensitiveDataCodec<String>
```
Last but not least, once you get your own decoder please [add it to the
classpath](using-server.md#adding-runtime-dependencies) otherwise the broker will fail to load it!
classpath](using-server.md#adding-runtime-dependencies) otherwise the broker
will fail to load it!

View File

@ -13,17 +13,17 @@ You could for example use these maven plugins on your testsuite or deployment au
There are three goals that you can use
- create
- `create`
This will create a server accordingly to your arguments. You can do some extra tricks here such as installing extra libraries for external modules.
This will create a server accordingly to your arguments. You can do some extra tricks here such as installing extra libraries for external modules.
- cli
- `cli`
This will perform any CLI operation. This is basically a maven expression of the CLI classes
This will perform any CLI operation. This is basically a maven expression of the CLI classes
- runClient
- `runClient`
This is a simple wrapper around classes implementing a static main call. Notice that this won't spawn a new VM or new Thread.
This is a simple wrapper around classes implementing a static main call. Notice that this won't spawn a new VM or new Thread.
## Declaration
@ -31,11 +31,14 @@ This is a simple wrapper around classes implementing a static main call. Notice
On your pom, use the plugins section:
```xml
<build>
<plugins>
<plugin>
<groupId>org.apache.activemq</groupId>
<artifactId>artemis-maven-plugin</artifactId>
<build>
<plugins>
<plugin>
<groupId>org.apache.activemq</groupId>
<artifactId>artemis-maven-plugin</artifactId>
</plugin>
</plugins>
</build>
```
## create goal
@ -43,7 +46,7 @@ On your pom, use the plugins section:
I won't detail every operation of the create plugin here, but I will try to describe the main parameters:
Name | Description
:--- | :---
--- | ---
configuration | A place that will hold any file to replace on the configuration. For instance if you are providing your own broker.xml. Default is "${basedir}/target/classes/activemq/server0"
home | The location where you downloaded and installed artemis. Default is "${activemq.basedir}"
alternateHome | This is used case you have two possible locations for your home (e.g. one under compile and one under production
@ -54,16 +57,15 @@ liblist[] | A list of libraries to be installed under ./lib. ex: "org.jgroups:jg
Example:
```xml
<executions>
<execution>
<id>create</id>
<goals>
<goal>create</goal>
</goals>
<configuration>
<ignore>${noServer}</ignore>
</configuration>
</execution>
<execution>
<id>create</id>
<goals>
<goal>create</goal>
</goals>
<configuration>
<ignore>${noServer}</ignore>
</configuration>
</execution>
```
@ -72,7 +74,7 @@ Example:
Some properties for the CLI
Name | Description
:--- | :---
--- | ---
configuration | A place that will hold any file to replace on the configuration. For instance if you are providing your own broker.xml. Default is "${basedir}/target/classes/activemq/server0"
home | The location where you downloaded and installed artemis. Default is "${activemq.basedir}"
alternateHome | This is used case you have two possible locations for your home (e.g. one under compile and one under production
@ -105,7 +107,7 @@ Example:
This is a simple solution for running classes implementing the main method.
Name | Description
:--- | :---
--- | ---
clientClass | A class implement a static void main(String arg[])
args | A string array of arguments passed to the method

View File

@ -2,19 +2,19 @@
Messages can be set with an optional *time to live* when sending them.
Apache ActiveMQ Artemis will not deliver a message to a consumer after it's time to
live has been exceeded. If the message hasn't been delivered by the time
that time to live is reached the server can discard it.
Apache ActiveMQ Artemis will not deliver a message to a consumer after it's
time to live has been exceeded. If the message hasn't been delivered by the
time that time to live is reached the server can discard it.
Apache ActiveMQ Artemis's addresses can be assigned a expiry address so that, when
messages are expired, they are removed from the queue and sent to the
expiry address. Many different queues can be bound to an expiry address.
These *expired* messages can later be consumed for further inspection.
Apache ActiveMQ Artemis's addresses can be assigned a expiry address so that,
when messages are expired, they are removed from the queue and sent to the
expiry address. Many different queues can be bound to an expiry address. These
*expired* messages can later be consumed for further inspection.
## Core API
Using Apache ActiveMQ Artemis Core API, you can set an expiration time directly on the
message:
Using Apache ActiveMQ Artemis Core API, you can set an expiration time directly
on the message:
```java
// message will expire in 5000ms from now
@ -28,23 +28,23 @@ JMS MessageProducer allows to set a TimeToLive for the messages it sent:
producer.setTimeToLive(5000);
```
Expired messages which are consumed from an expiry address have the
following properties:
Expired messages which are consumed from an expiry address have the following
properties:
- `_AMQ_ORIG_ADDRESS`
- `_AMQ_ORIG_ADDRESS`
a String property containing the *original address* of the expired
message
a String property containing the *original address* of the expired
message
- `_AMQ_ORIG_QUEUE`
- `_AMQ_ORIG_QUEUE`
a String property containing the *original queue* of the expired
message
a String property containing the *original queue* of the expired
message
- `_AMQ_ACTUAL_EXPIRY`
- `_AMQ_ACTUAL_EXPIRY`
a Long property containing the *actual expiration time* of the
expired message
a Long property containing the *actual expiration time* of the
expired message
## Configuring Expiry Addresses
@ -57,29 +57,29 @@ Expiry address are defined in the address-setting configuration:
</address-setting>
```
If messages are expired and no expiry address is specified, messages are
simply removed from the queue and dropped. Address wildcards can be used
to configure expiry address for a set of addresses (see [Understanding the Wildcard Syntax](wildcard-syntax.md)).
If messages are expired and no expiry address is specified, messages are simply
removed from the queue and dropped. Address [wildcards](wildcard-syntax.md) can
be used to configure expiry address for a set of addresses.
## Configuring The Expiry Reaper Thread
A reaper thread will periodically inspect the queues to check if
messages have expired.
A reaper thread will periodically inspect the queues to check if messages have
expired.
The reaper thread can be configured with the following properties in
`broker.xml`
- `message-expiry-scan-period`
- `message-expiry-scan-period`
How often the queues will be scanned to detect expired messages (in
milliseconds, default is 30000ms, set to `-1` to disable the reaper
thread)
How often the queues will be scanned to detect expired messages (in
milliseconds, default is 30000ms, set to `-1` to disable the reaper thread)
- `message-expiry-thread-priority`
- `message-expiry-thread-priority`
The reaper thread priority (it must be between 1 and 10, 10 being the
highest priority, default is 3)
The reaper thread priority (it must be between 1 and 10, 10 being the highest
priority, default is 3)
## Example
See the [examples.md](examples.md) chapter for an example which shows how message expiry is configured and used with JMS.
See the [Message Expiration Example](examples.md#message-expiration) which
shows how message expiry is configured and used with JMS.

View File

@ -1,106 +1,106 @@
# Message Grouping
Message groups are sets of messages that have the following
characteristics:
Message groups are sets of messages that have the following characteristics:
- Messages in a message group share the same group id, i.e. they have
same group identifier property (`JMSXGroupID` for JMS,
`_AMQ_GROUP_ID` for Apache ActiveMQ Artemis Core API).
- Messages in a message group share the same group id, i.e. they have same
group identifier property (`JMSXGroupID` for JMS, `_AMQ_GROUP_ID` for Apache
ActiveMQ Artemis Core API).
- Messages in a message group are always consumed by the same
consumer, even if there are many consumers on a queue. They pin all
messages with the same group id to the same consumer. If that
consumer closes another consumer is chosen and will receive all
messages with the same group id.
- Messages in a message group are always consumed by the same consumer, even if
there are many consumers on a queue. They pin all messages with the same
group id to the same consumer. If that consumer closes another consumer is
chosen and will receive all messages with the same group id.
Message groups are useful when you want all messages for a certain value
of the property to be processed serially by the same consumer.
Message groups are useful when you want all messages for a certain value of the
property to be processed serially by the same consumer.
An example might be orders for a certain stock. You may want orders for
any particular stock to be processed serially by the same consumer. To
do this you can create a pool of consumers (perhaps one for each stock,
but less will work too), then set the stock name as the value of the
_AMQ_GROUP_ID property.
An example might be orders for a certain stock. You may want orders for any
particular stock to be processed serially by the same consumer. To do this you
can create a pool of consumers (perhaps one for each stock, but less will work
too), then set the stock name as the value of the _AMQ_GROUP_ID property.
This will ensure that all messages for a particular stock will always be
processed by the same consumer.
> **Note**
> **Note:**
>
> Grouped messages can impact the concurrent processing of non-grouped
> messages due to the underlying FIFO semantics of a queue. For example,
> if there is a chunk of 100 grouped messages at the head of a queue
> followed by 1,000 non-grouped messages then all the grouped messages
> will need to be sent to the appropriate client (which is consuming
> those grouped messages serially) before any of the non-grouped
> messages can be consumed. The functional impact in this scenario is a
> temporary suspension of concurrent message processing while all the
> grouped messages are processed. This can be a performance bottleneck
> so keep it in mind when determining the size of your message groups,
> and consider whether or not you should isolate your grouped messages
> Grouped messages can impact the concurrent processing of non-grouped messages
> due to the underlying FIFO semantics of a queue. For example, if there is a
> chunk of 100 grouped messages at the head of a queue followed by 1,000
> non-grouped messages then all the grouped messages will need to be sent to
> the appropriate client (which is consuming those grouped messages serially)
> before any of the non-grouped messages can be consumed. The functional impact
> in this scenario is a temporary suspension of concurrent message processing
> while all the grouped messages are processed. This can be a performance
> bottleneck so keep it in mind when determining the size of your message
> groups, and consider whether or not you should isolate your grouped messages
> from your non-grouped messages.
## Using Core API
The property name used to identify the message group is `"_AMQ_GROUP_ID"`
(or the constant `MessageImpl.HDR_GROUP_ID`). Alternatively, you can set
`autogroup` to true on the `SessionFactory` which will pick a random
unique id.
The property name used to identify the message group is `"_AMQ_GROUP_ID"` (or
the constant `MessageImpl.HDR_GROUP_ID`). Alternatively, you can set
`autogroup` to true on the `SessionFactory` which will pick a random unique id.
## Using JMS
The property name used to identify the message group is `JMSXGroupID`.
// send 2 messages in the same group to ensure the same
// consumer will receive both
Message message = ...
message.setStringProperty("JMSXGroupID", "Group-0");
producer.send(message);
```java
// send 2 messages in the same group to ensure the same
// consumer will receive both
Message message = ...
message.setStringProperty("JMSXGroupID", "Group-0");
producer.send(message);
message = ...
message.setStringProperty("JMSXGroupID", "Group-0");
producer.send(message);
message = ...
message.setStringProperty("JMSXGroupID", "Group-0");
producer.send(message);
```
Alternatively, you can set `autogroup` to true on the
`ActiveMQConnectonFactory` which will pick a random unique id. This can
also be set in the JNDI context environment, e.g. `jndi.properties`.
Here's a simple example using the "ConnectionFactory" connection factory
which is available in the context by default
`ActiveMQConnectonFactory` which will pick a random unique id. This can also be
set in the JNDI context environment, e.g. `jndi.properties`. Here's a simple
example using the "ConnectionFactory" connection factory which is available in
the context by default
java.naming.factory.initial=org.apache.activemq.artemis.jndi.ActiveMQInitialContextFactory
connectionFactory.myConnectionFactory=tcp://localhost:61616?autoGroup=true
```properties
java.naming.factory.initial=org.apache.activemq.artemis.jndi.ActiveMQInitialContextFactory
connectionFactory.myConnectionFactory=tcp://localhost:61616?autoGroup=true
```
Alternatively you can set the group id via the connection factory. All
messages sent with producers created via this connection factory will
set the `JMSXGroupID` to the specified value on all messages sent. This
can also be set in the JNDI context environment, e.g. `jndi.properties`.
Here's a simple example using the "ConnectionFactory" connection factory
which is available in the context by default:
Alternatively you can set the group id via the connection factory. All messages
sent with producers created via this connection factory will set the
`JMSXGroupID` to the specified value on all messages sent. This can also be set
in the JNDI context environment, e.g. `jndi.properties`. Here's a simple
example using the "ConnectionFactory" connection factory which is available in
the context by default:
java.naming.factory.initial=org.apache.activemq.artemis.jndi.ActiveMQInitialContextFactory
connectionFactory.myConnectionFactory=tcp://localhost:61616?groupID=Group-0
```properties
java.naming.factory.initial=org.apache.activemq.artemis.jndi.ActiveMQInitialContextFactory
connectionFactory.myConnectionFactory=tcp://localhost:61616?groupID=Group-0
```
## Example
See the [examples](examples.md} chapter for an example which shows how message groups are configured and used with JMS and via a connection factory.
See the [Message Group Example](examples.md#message-group) which shows how
message groups are configured and used with JMS and via a connection factory.
## Clustered Grouping
Using message groups in a cluster is a bit more complex. This is because
messages with a particular group id can arrive on any node so each node
needs to know about which group id's are bound to which consumer on
which node. The consumer handling messages for a particular group id may
be on a different node of the cluster, so each node needs to know this
information so it can route the message correctly to the node which has
that consumer.
messages with a particular group id can arrive on any node so each node needs
to know about which group id's are bound to which consumer on which node. The
consumer handling messages for a particular group id may be on a different node
of the cluster, so each node needs to know this information so it can route the
message correctly to the node which has that consumer.
To solve this there is the notion of a grouping handler. Each node will
have its own grouping handler and when a messages is sent with a group
id assigned, the handlers will decide between them which route the
message should take.
To solve this there is the notion of a grouping handler. Each node will have
its own grouping handler and when a messages is sent with a group id assigned,
the handlers will decide between them which route the message should take.
Here is a sample config for each type of handler. This should be
configured in `broker.xml`.
Here is a sample config for each type of handler. This should be configured in
`broker.xml`.
```xml
<grouping-handler name="my-grouping-handler">
@ -116,71 +116,66 @@ configured in `broker.xml`.
</grouping-handler>
```
- `type` two types of handlers are supported - `LOCAL` and `REMOTE`.
Each cluster should choose 1 node to have a `LOCAL` grouping handler
and all the other nodes should have `REMOTE` handlers. It's the `LOCAL`
handler that actually makes the decision as to what route should be
used, all the other `REMOTE` handlers converse with this.
- `type` two types of handlers are supported - `LOCAL` and `REMOTE`. Each
cluster should choose 1 node to have a `LOCAL` grouping handler and all the
other nodes should have `REMOTE` handlers. It's the `LOCAL` handler that
actually makes the decision as to what route should be used, all the other
`REMOTE` handlers converse with this.
- `address` refers to a [cluster connection and the address
it uses](clusters.md#configuring-cluster-connections). Refer to the
clustering section on how to configure clusters.
- `address` refers to a [cluster connection and the address it
uses](clusters.md#configuring-cluster-connections). Refer to the clustering
section on how to configure clusters.
- `timeout` how long to wait for a decision to be made. An exception
will be thrown during the send if this timeout is reached, this
ensures that strict ordering is kept.
- `timeout` how long to wait for a decision to be made. An exception will be
thrown during the send if this timeout is reached, this ensures that strict
ordering is kept.
The decision as to where a message should be routed to is initially
proposed by the node that receives the message. The node will pick a
suitable route as per the normal clustered routing conditions, i.e.
round robin available queues, use a local queue first and choose a queue
that has a consumer. If the proposal is accepted by the grouping
handlers the node will route messages to this queue from that point on,
if rejected an alternative route will be offered and the node will again
route to that queue indefinitely. All other nodes will also route to the
queue chosen at proposal time. Once the message arrives at the queue
then normal single server message group semantics take over and the
The decision as to where a message should be routed to is initially proposed by
the node that receives the message. The node will pick a suitable route as per
the normal clustered routing conditions, i.e. round robin available queues,
use a local queue first and choose a queue that has a consumer. If the proposal
is accepted by the grouping handlers the node will route messages to this queue
from that point on, if rejected an alternative route will be offered and the
node will again route to that queue indefinitely. All other nodes will also
route to the queue chosen at proposal time. Once the message arrives at the
queue then normal single server message group semantics take over and the
message is pinned to a consumer on that queue.
You may have noticed that there is a single point of failure with the
single local handler. If this node crashes then no decisions will be
able to be made. Any messages sent will be not be delivered and an
exception thrown. To avoid this happening Local Handlers can be
replicated on another backup node. Simple create your back up node and
configure it with the same Local handler.
You may have noticed that there is a single point of failure with the single
local handler. If this node crashes then no decisions will be able to be made.
Any messages sent will be not be delivered and an exception thrown. To avoid
this happening Local Handlers can be replicated on another backup node. Simple
create your back up node and configure it with the same Local handler.
## Clustered Grouping Best Practices
Some best practices should be followed when using clustered grouping:
1. Make sure your consumers are distributed evenly across the different
nodes if possible. This is only an issue if you are creating and
closing consumers regularly. Since messages are always routed to the
same queue once pinned, removing a consumer from this queue may
leave it with no consumers meaning the queue will just keep
receiving the messages. Avoid closing consumers or make sure that
you always have plenty of consumers, i.e., if you have 3 nodes have
3 consumers.
1. Make sure your consumers are distributed evenly across the different nodes
if possible. This is only an issue if you are creating and closing
consumers regularly. Since messages are always routed to the same queue once
pinned, removing a consumer from this queue may leave it with no consumers
meaning the queue will just keep receiving the messages. Avoid closing
consumers or make sure that you always have plenty of consumers, i.e., if you
have 3 nodes have 3 consumers.
2. Use durable queues if possible. If queues are removed once a group
is bound to it, then it is possible that other nodes may still try
to route messages to it. This can be avoided by making sure that the
queue is deleted by the session that is sending the messages. This
means that when the next message is sent it is sent to the node
where the queue was deleted meaning a new proposal can successfully
take place. Alternatively you could just start using a different
group id.
2. Use durable queues if possible. If queues are removed once a group is bound
to it, then it is possible that other nodes may still try to route messages
to it. This can be avoided by making sure that the queue is deleted by the
session that is sending the messages. This means that when the next message is
sent it is sent to the node where the queue was deleted meaning a new proposal
can successfully take place. Alternatively you could just start using a
different group id.
3. Always make sure that the node that has the Local Grouping Handler
is replicated. These means that on failover grouping will still
occur.
3. Always make sure that the node that has the Local Grouping Handler is
replicated. These means that on failover grouping will still occur.
4. In case you are using group-timeouts, the remote node should have a
smaller group-timeout with at least half of the value on the main
coordinator. This is because this will determine how often the
last-time-use value should be updated with a round trip for a
request to the group between the nodes.
4. In case you are using group-timeouts, the remote node should have a smaller
group-timeout with at least half of the value on the main coordinator. This
is because this will determine how often the last-time-use value should be
updated with a round trip for a request to the group between the nodes.
## Clustered Grouping Example
See the [examples](examples.md) chapter for an example of how to configure message groups with a ActiveMQ Artemis Cluster.
See the [Clustered Grouping Example](examples.md#clustered-grouping) which
shows how to configure message groups with a ActiveMQ Artemis Cluster.

View File

@ -1,240 +1,240 @@
# Messaging Concepts
Apache ActiveMQ Artemis is an asynchronous messaging system, an example of [Message
Oriented
Middleware](https://en.wikipedia.org/wiki/Message-oriented_middleware) ,
we'll just call them messaging systems in the remainder of this book.
Apache ActiveMQ Artemis is an asynchronous messaging system, an example of
[Message Oriented
Middleware](https://en.wikipedia.org/wiki/Message-oriented_middleware) , we'll
just call them messaging systems in the remainder of this book.
We'll first present a brief overview of what kind of things messaging
systems do, where they're useful and the kind of concepts you'll hear
about in the messaging world.
We'll first present a brief overview of what kind of things messaging systems
do, where they're useful and the kind of concepts you'll hear about in the
messaging world.
If you're already familiar with what a messaging system is and what it's
capable of, then you can skip this chapter.
## General Concepts
Messaging systems allow you to loosely couple heterogeneous systems
together, whilst typically providing reliability, transactions and many
other features.
Messaging systems allow you to loosely couple heterogeneous systems together,
whilst typically providing reliability, transactions and many other features.
Unlike systems based on a [Remote Procedure
Call](https://en.wikipedia.org/wiki/Remote_procedure_call) (RPC) pattern,
messaging systems primarily use an asynchronous message passing pattern
with no tight relationship between requests and responses. Most
messaging systems also support a request-response mode but this is not a
primary feature of messaging systems.
messaging systems primarily use an asynchronous message passing pattern with no
tight relationship between requests and responses. Most messaging systems also
support a request-response mode but this is not a primary feature of messaging
systems.
Designing systems to be asynchronous from end-to-end allows you to
really take advantage of your hardware resources, minimizing the amount
of threads blocking on IO operations, and to use your network bandwidth
to its full capacity. With an RPC approach you have to wait for a
response for each request you make so are limited by the network round
trip time, or *latency* of your network. With an asynchronous system you
can pipeline flows of messages in different directions, so are limited
by the network *bandwidth* not the latency. This typically allows you to
create much higher performance applications.
Designing systems to be asynchronous from end-to-end allows you to really take
advantage of your hardware resources, minimizing the amount of threads blocking
on IO operations, and to use your network bandwidth to its full capacity. With
an RPC approach you have to wait for a response for each request you make so
are limited by the network round trip time, or *latency* of your network. With
an asynchronous system you can pipeline flows of messages in different
directions, so are limited by the network *bandwidth* not the latency. This
typically allows you to create much higher performance applications.
Messaging systems decouple the senders of messages from the consumers of
messages. The senders and consumers of messages are completely
independent and know nothing of each other. This allows you to create
flexible, loosely coupled systems.
messages. The senders and consumers of messages are completely independent and
know nothing of each other. This allows you to create flexible, loosely coupled
systems.
Often, large enterprises use a messaging system to implement a message
bus which loosely couples heterogeneous systems together. Message buses
often form the core of an [Enterprise Service
Bus](https://en.wikipedia.org/wiki/Enterprise_service_bus). (ESB). Using
a message bus to de-couple disparate systems can allow the system to
grow and adapt more easily. It also allows more flexibility to add new
systems or retire old ones since they don't have brittle dependencies on
each other.
Often, large enterprises use a messaging system to implement a message bus
which loosely couples heterogeneous systems together. Message buses often form
the core of an [Enterprise Service
Bus](https://en.wikipedia.org/wiki/Enterprise_service_bus). (ESB). Using a
message bus to de-couple disparate systems can allow the system to grow and
adapt more easily. It also allows more flexibility to add new systems or retire
old ones since they don't have brittle dependencies on each other.
## Messaging styles
Messaging systems normally support two main styles of asynchronous
messaging: [message queue](https://en.wikipedia.org/wiki/Message_queue)
messaging (also known as *point-to-point messaging*) and [publish
subscribe](https://en.wikipedia.org/wiki/Publish_subscribe) messaging.
We'll summarise them briefly here:
Messaging systems normally support two main styles of asynchronous messaging:
[message queue](https://en.wikipedia.org/wiki/Message_queue) messaging (also
known as *point-to-point messaging*) and [publish
subscribe](https://en.wikipedia.org/wiki/Publish_subscribe) messaging. We'll
summarise them briefly here:
### Point-to-Point
With this type of messaging you send a message to a queue. The message
is then typically persisted to provide a guarantee of delivery, then
some time later the messaging system delivers the message to a consumer.
The consumer then processes the message and when it is done, it
acknowledges the message. Once the message is acknowledged it disappears
from the queue and is not available to be delivered again. If the system
crashes before the messaging server receives an acknowledgement from the
consumer, then on recovery, the message will be available to be
delivered to a consumer again.
With this type of messaging you send a message to a queue. The message is then
typically persisted to provide a guarantee of delivery, then some time later
the messaging system delivers the message to a consumer. The consumer then
processes the message and when it is done, it acknowledges the message. Once
the message is acknowledged it disappears from the queue and is not available
to be delivered again. If the system crashes before the messaging server
receives an acknowledgement from the consumer, then on recovery, the message
will be available to be delivered to a consumer again.
With point-to-point messaging, there can be many consumers on the queue
but a particular message will only ever be consumed by a maximum of one
of them. Senders (also known as *producers*) to the queue are completely
decoupled from receivers (also known as *consumers*) of the queue - they
do not know of each other's existence.
With point-to-point messaging, there can be many consumers on the queue but a
particular message will only ever be consumed by a maximum of one of them.
Senders (also known as *producers*) to the queue are completely decoupled from
receivers (also known as *consumers*) of the queue - they do not know of each
other's existence.
A classic example of point to point messaging would be an order queue in
a company's book ordering system. Each order is represented as a message
which is sent to the order queue. Let's imagine there are many front end
ordering systems which send orders to the order queue. When a message
arrives on the queue it is persisted - this ensures that if the server
crashes the order is not lost. Let's also imagine there are many
consumers on the order queue - each representing an instance of an order
processing component - these can be on different physical machines but
consuming from the same queue. The messaging system delivers each
message to one and only one of the ordering processing components.
Different messages can be processed by different order processors, but a
single order is only processed by one order processor - this ensures
A classic example of point to point messaging would be an order queue in a
company's book ordering system. Each order is represented as a message which is
sent to the order queue. Let's imagine there are many front end ordering
systems which send orders to the order queue. When a message arrives on the
queue it is persisted - this ensures that if the server crashes the order is
not lost. Let's also imagine there are many consumers on the order queue - each
representing an instance of an order processing component - these can be on
different physical machines but consuming from the same queue. The messaging
system delivers each message to one and only one of the ordering processing
components. Different messages can be processed by different order processors,
but a single order is only processed by one order processor - this ensures
orders aren't processed twice.
As an order processor receives a message, it fulfills the order, sends
order information to the warehouse system and then updates the order
database with the order details. Once it's done that it acknowledges the
message to tell the server that the order has been processed and can be
forgotten about. Often the send to the warehouse system, update in
database and acknowledgement will be completed in a single transaction
to ensure [ACID](https://en.wikipedia.org/wiki/ACID) properties.
As an order processor receives a message, it fulfills the order, sends order
information to the warehouse system and then updates the order database with
the order details. Once it's done that it acknowledges the message to tell the
server that the order has been processed and can be forgotten about. Often the
send to the warehouse system, update in database and acknowledgement will be
completed in a single transaction to ensure
[ACID](https://en.wikipedia.org/wiki/ACID) properties.
### Publish-Subscribe
With publish-subscribe messaging many senders can send messages to an
entity on the server, often called a *topic* (e.g. in the JMS world).
With publish-subscribe messaging many senders can send messages to an entity on
the server, often called a *topic* (e.g. in the JMS world).
There can be many *subscriptions* on a topic, a subscription is just
another word for a consumer of a topic. Each subscription receives a
*copy* of *each* message sent to the topic. This differs from the
message queue pattern where each message is only consumed by a single
consumer.
There can be many *subscriptions* on a topic, a subscription is just another
word for a consumer of a topic. Each subscription receives a *copy* of *each*
message sent to the topic. This differs from the message queue pattern where
each message is only consumed by a single consumer.
Subscriptions can optionally be *durable* which means they retain a copy
of each message sent to the topic until the subscriber consumes them -
even if the server crashes or is restarted in between. Non-durable
subscriptions only last a maximum of the lifetime of the connection that
created them.
Subscriptions can optionally be *durable* which means they retain a copy of
each message sent to the topic until the subscriber consumes them - even if the
server crashes or is restarted in between. Non-durable subscriptions only last
a maximum of the lifetime of the connection that created them.
An example of publish-subscribe messaging would be a news feed. As news
articles are created by different editors around the world they are sent
to a news feed topic. There are many subscribers around the world who
are interested in receiving news items - each one creates a subscription
and the messaging system ensures that a copy of each news message is
delivered to each subscription.
articles are created by different editors around the world they are sent to a
news feed topic. There are many subscribers around the world who are interested
in receiving news items - each one creates a subscription and the messaging
system ensures that a copy of each news message is delivered to each
subscription.
## Delivery guarantees
A key feature of most messaging systems is *reliable messaging*. With
reliable messaging the server gives a guarantee that the message will be
delivered once and only once to each consumer of a queue or each durable
subscription of a topic, even in the event of system failure. This is
crucial for many businesses; e.g. you don't want your orders fulfilled
more than once or any of your orders to be lost.
A key feature of most messaging systems is *reliable messaging*. With reliable
messaging the server gives a guarantee that the message will be delivered once
and only once to each consumer of a queue or each durable subscription of a
topic, even in the event of system failure. This is crucial for many
businesses; e.g. you don't want your orders fulfilled more than once or any of
your orders to be lost.
In other cases you may not care about a once and only once delivery
guarantee and are happy to cope with duplicate deliveries or lost
messages - an example of this might be transient stock price updates -
which are quickly superseded by the next update on the same stock. The
messaging system allows you to configure which delivery guarantees you
require.
In other cases you may not care about a once and only once delivery guarantee
and are happy to cope with duplicate deliveries or lost messages - an example
of this might be transient stock price updates - which are quickly superseded
by the next update on the same stock. The messaging system allows you to
configure which delivery guarantees you require.
## Transactions
Messaging systems typically support the sending and acknowledgement of
multiple messages in a single local transaction. Apache ActiveMQ Artemis also supports
Messaging systems typically support the sending and acknowledgement of multiple
messages in a single local transaction. Apache ActiveMQ Artemis also supports
the sending and acknowledgement of message as part of a large global
transaction - using the Java mapping of XA: JTA.
## Durability
Messages are either durable or non durable. Durable messages will be
persisted in permanent storage and will survive server failure or
restart. Non durable messages will not survive server failure or
restart. Examples of durable messages might be orders or trades, where
they cannot be lost. An example of a non durable message might be a
stock price update which is transitory and doesn't need to survive a
restart.
Messages are either durable or non durable. Durable messages will be persisted
in permanent storage and will survive server failure or restart. Non durable
messages will not survive server failure or restart. Examples of durable
messages might be orders or trades, where they cannot be lost. An example of a
non durable message might be a stock price update which is transitory and
doesn't need to survive a restart.
## Messaging APIs and protocols
How do client applications interact with messaging systems in order to
send and consume messages?
How do client applications interact with messaging systems in order to send and
consume messages?
Several messaging systems provide their own proprietary APIs with which
the client communicates with the messaging system.
Several messaging systems provide their own proprietary APIs with which the
client communicates with the messaging system.
There are also some standard ways of operating with messaging systems
and some emerging standards in this space.
There are also some standard ways of operating with messaging systems and some
emerging standards in this space.
Let's take a brief look at these:
### Java Message Service (JMS)
[JMS](https://en.wikipedia.org/wiki/Java_Message_Service) is part of
Oracle's Java EE specification. It's a Java API that encapsulates both message
queue and publish-subscribe messaging patterns. JMS is a lowest common
denominator specification - i.e. it was created to encapsulate common
functionality of the already existing messaging systems that were
available at the time of its creation.
[JMS](https://en.wikipedia.org/wiki/Java_Message_Service) is part of Oracle's
Java EE specification. It's a Java API that encapsulates both message queue and
publish-subscribe messaging patterns. JMS is a lowest common denominator
specification - i.e. it was created to encapsulate common functionality of the
already existing messaging systems that were available at the time of its
creation.
JMS is a very popular API and is implemented by most messaging systems.
JMS is only available to clients running Java.
JMS is a very popular API and is implemented by most messaging systems. JMS is
only available to clients running Java.
JMS does not define a standard wire format - it only defines a
programmatic API so JMS clients and servers from different vendors
cannot directly interoperate since each will use the vendor's own
internal wire protocol.
JMS does not define a standard wire format - it only defines a programmatic API
so JMS clients and servers from different vendors cannot directly interoperate
since each will use the vendor's own internal wire protocol.
Apache ActiveMQ Artemis provides a fully compliant JMS 1.1 and JMS 2.0 API.
Apache ActiveMQ Artemis provides a fully compliant [JMS 1.1 and JMS 2.0 client
implementation](using-jms.md).
### System specific APIs
Many systems provide their own programmatic API for which to interact
with the messaging system. The advantage of this it allows the full set
of system functionality to be exposed to the client application. API's
like JMS are not normally rich enough to expose all the extra features
that most messaging systems provide.
Many systems provide their own programmatic API for which to interact with the
messaging system. The advantage of this it allows the full set of system
functionality to be exposed to the client application. API's like JMS are not
normally rich enough to expose all the extra features that most messaging
systems provide.
Apache ActiveMQ Artemis provides its own core client API for clients to use if they
wish to have access to functionality over and above that accessible via
Apache ActiveMQ Artemis provides its own core client API for clients to use if
they wish to have access to functionality over and above that accessible via
the JMS API.
Please see [Core](core.md) for using the Core API with Apache ActiveMQ Artemis.
### RESTful API
[REST](https://en.wikipedia.org/wiki/Representational_State_Transfer)
approaches to messaging are showing a lot interest recently.
It seems plausible that API standards for cloud computing may converge
on a REST style set of interfaces and consequently a REST messaging
approach is a very strong contender for becoming the de-facto method for
messaging interoperability.
It seems plausible that API standards for cloud computing may converge on a
REST style set of interfaces and consequently a REST messaging approach is a
very strong contender for becoming the de-facto method for messaging
interoperability.
With a REST approach messaging resources are manipulated as resources
defined by a URI and typically using a simple set of operations on those
resources, e.g. PUT, POST, GET etc. REST approaches to messaging often
use HTTP as their underlying protocol.
With a REST approach messaging resources are manipulated as resources defined
by a URI and typically using a simple set of operations on those resources,
e.g. PUT, POST, GET etc. REST approaches to messaging often use HTTP as their
underlying protocol.
The advantage of a REST approach with HTTP is in its simplicity and the
fact the internet is already tuned to deal with HTTP optimally.
The advantage of a REST approach with HTTP is in its simplicity and the fact
the internet is already tuned to deal with HTTP optimally.
Please see [Rest Interface](rest.md) for using Apache ActiveMQ Artemis's RESTful interface.
Please see [Rest Interface](rest.md) for using Apache ActiveMQ Artemis's
RESTful interface.
### AMQP
[AMQP](https://en.wikipedia.org/wiki/AMQP) is a specification for
interoperable messaging. It also defines a wire format, so any AMQP
client can work with any messaging system that supports AMQP. AMQP
clients are available in many different programming languages.
[AMQP](https://en.wikipedia.org/wiki/AMQP) is a specification for interoperable
messaging. It also defines a wire format, so any AMQP client can work with any
messaging system that supports AMQP. AMQP clients are available in many
different programming languages.
Apache ActiveMQ Artemis implements the [AMQP
1.0](https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=amqp)
specification. Any client that supports the 1.0 specification will be
able to interact with Apache ActiveMQ Artemis.
specification. Any client that supports the 1.0 specification will be able to
interact with Apache ActiveMQ Artemis.
Please see [AMQP](amqp.md) for using AMQP with Apache ActiveMQ Artemis.
### MQTT
[MQTT](https://mqtt.org/) is a lightweight connectivity protocol. It is designed
to run in environments where device and networks are constrained. Out of the box
Apache ActiveMQ Artemis supports version MQTT 3.1.1. Any client supporting this
version of the protocol will work against Apache ActiveMQ Artemis.
[MQTT](https://mqtt.org/) is a lightweight connectivity protocol. It is
designed to run in environments where device and networks are constrained. Out
of the box Apache ActiveMQ Artemis supports version MQTT 3.1.1. Any client
supporting this version of the protocol will work against Apache ActiveMQ
Artemis.
Please see [MQTT](mqtt.md) for using MQTT with Apache ActiveMQ Artemis.
### STOMP
@ -244,64 +244,67 @@ theoretically any Stomp client can work with any messaging system that
supports Stomp. Stomp clients are available in many different
programming languages.
Please see [Stomp](protocols-interoperability.md) for using STOMP with Apache ActiveMQ Artemis.
Please see [Stomp](stomp.md) for using STOMP with Apache ActiveMQ Artemis.
### OPENWIRE
### OpenWire
ActiveMQ 5.x defines it's own wire Protocol "OPENWIRE". In order to support
ActiveMQ 5.x clients, Apache ActiveMQ Artemis supports OPENWIRE. Any ActiveMQ 5.12.x
or higher can be used with Apache ActiveMQ Artemis.
ActiveMQ 5.x defines it's own wire protocol: OpenWire. In order to support
ActiveMQ 5.x clients, Apache ActiveMQ Artemis supports OpenWire. Any ActiveMQ
5.12.x or higher can be used with Apache ActiveMQ Artemis.
Please see [OpenWire](openwire.md) for using OpenWire with Apache ActiveMQ
Artemis.
## High Availability
High Availability (HA) means that the system should remain operational
after failure of one or more of the servers. The degree of support for
HA varies between various messaging systems.
High Availability (HA) means that the system should remain operational after
failure of one or more of the servers. The degree of support for HA varies
between various messaging systems.
Apache ActiveMQ Artemis provides automatic failover where your sessions are
automatically reconnected to the backup server on event of live server
failure.
automatically reconnected to the backup server on event of live server failure.
For more information on HA, please see [High Availability and Failover](ha.md).
## Clusters
Many messaging systems allow you to create groups of messaging servers
called *clusters*. Clusters allow the load of sending and consuming
messages to be spread over many servers. This allows your system to
scale horizontally by adding new servers to the cluster.
Many messaging systems allow you to create groups of messaging servers called
*clusters*. Clusters allow the load of sending and consuming messages to be
spread over many servers. This allows your system to scale horizontally by
adding new servers to the cluster.
Degrees of support for clusters varies between messaging systems, with
some systems having fairly basic clusters with the cluster members being
hardly aware of each other.
Degrees of support for clusters varies between messaging systems, with some
systems having fairly basic clusters with the cluster members being hardly
aware of each other.
Apache ActiveMQ Artemis provides very configurable state-of-the-art clustering model
where messages can be intelligently load balanced between the servers in
the cluster, according to the number of consumers on each node, and
whether they are ready for messages.
Apache ActiveMQ Artemis provides very configurable state-of-the-art clustering
model where messages can be intelligently load balanced between the servers in
the cluster, according to the number of consumers on each node, and whether
they are ready for messages.
Apache ActiveMQ Artemis also has the ability to automatically redistribute messages
between nodes of a cluster to prevent starvation on any particular node.
Apache ActiveMQ Artemis also has the ability to automatically redistribute
messages between nodes of a cluster to prevent starvation on any particular
node.
For full details on clustering, please see [Clusters](clusters.md).
## Bridges and routing
Some messaging systems allow isolated clusters or single nodes to be
bridged together, typically over unreliable connections like a wide area
network (WAN), or the internet.
Some messaging systems allow isolated clusters or single nodes to be bridged
together, typically over unreliable connections like a wide area network (WAN),
or the internet.
A bridge normally consumes from a queue on one server and forwards
messages to another queue on a different server. Bridges cope with
unreliable connections, automatically reconnecting when the connections
becomes available again.
A bridge normally consumes from a queue on one server and forwards messages to
another queue on a different server. Bridges cope with unreliable connections,
automatically reconnecting when the connections becomes available again.
Apache ActiveMQ Artemis bridges can be configured with filter expressions to only
forward certain messages, and transformation can also be hooked in.
Apache ActiveMQ Artemis bridges can be configured with filter expressions to
only forward certain messages, and transformation can also be hooked in.
Apache ActiveMQ Artemis also allows routing between queues to be configured in server
side configuration. This allows complex routing networks to be set up
forwarding or copying messages from one destination to another, forming
a global network of interconnected brokers.
Apache ActiveMQ Artemis also allows routing between queues to be configured in
server side configuration. This allows complex routing networks to be set up
forwarding or copying messages from one destination to another, forming a
global network of interconnected brokers.
For more information please see [Core Bridges](core-bridges.md) and [Diverting and Splitting Message Flows](diverts.md).
For more information please see [Core Bridges](core-bridges.md) and [Diverting
and Splitting Message Flows](diverts.md).

137
docs/user-manual/en/mqtt.md Normal file
View File

@ -0,0 +1,137 @@
# MQTT
MQTT is a light weight, client to server, publish / subscribe messaging
protocol. MQTT has been specifically designed to reduce transport overhead
(and thus network traffic) and code footprint on client devices. For this
reason MQTT is ideally suited to constrained devices such as sensors and
actuators and is quickly becoming the defacto standard communication protocol
for IoT.
Apache ActiveMQ Artemis supports MQTT v3.1.1 (and also the older v3.1 code
message format). By default there are `acceptor` elements configured to accept
MQTT connections on ports `61616` and `1883`.
See the general [Protocols and Interoperability](protocols-interoperability.md)
chapter for details on configuring an `acceptor` for MQTT.
The best source of information on the MQTT protocol is in the [3.1.1
specification](https://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html).
Refer to the MQTT examples for a look at some of this functionality in action.
## MQTT Quality of Service
MQTT offers 3 quality of service levels.
Each message (or topic subscription) can define a quality of service that is
associated with it. The quality of service level defined on a topic is the
maximum level a client is willing to accept. The quality of service level on a
message is the desired quality of service level for this message. The broker
will attempt to deliver messages to subscribers at the highest quality of
service level based on what is defined on the message and topic subscription.
Each quality of service level offers a level of guarantee by which a message is
sent or received:
- QoS 0: `AT MOST ONCE`
Guarantees that a particular message is only ever received by the subscriber
a maximum of one time. This does mean that the message may never arrive. The
sender and the receiver will attempt to deliver the message, but if something
fails and the message does not reach it's destination (say due to a network
connection) the message may be lost. This QoS has the least network traffic
overhead and the least burden on the client and the broker and is often useful
for telemetry data where it doesn't matter if some of the data is lost.
- QoS 1: `AT LEAST ONCE`
Guarantees that a message will reach it's intended recipient one or more
times. The sender will continue to send the message until it receives an
acknowledgment from the recipient, confirming it has received the message. The
result of this QoS is that the recipient may receive the message multiple
times, and also increases the network overhead than QoS 0, (due to acks). In
addition more burden is placed on the sender as it needs to store the message
and retry should it fail to receive an ack in a reasonable time.
- QoS 2: `EXACTLY ONCE`
The most costly of the QoS (in terms of network traffic and burden on sender
and receiver) this QoS will ensure that the message is received by a recipient
exactly one time. This ensures that the receiver never gets any duplicate
copies of the message and will eventually get it, but at the extra cost of
network overhead and complexity required on the sender and receiver.
## MQTT Retain Messages
MQTT has an interesting feature in which messages can be "retained" for a
particular address. This means that once a retain message has been sent to an
address, any new subscribers to that address will receive the last sent retain
message before any others messages, this happens even if the retained message
was sent before a client has connected or subscribed. An example of where this
feature might be useful is in environments such as IoT where devices need to
quickly get the current state of a system when they are on boarded into a
system.
## Will Messages
A will message can be sent when a client initially connects to a broker.
Clients are able to set a "will message" as part of the connect packet. If the
client abnormally disconnects, say due to a device or network failure the
broker will proceed to publish the will message to the specified address (as
defined also in the connect packet). Other subscribers to the will topic will
receive the will message and can react accordingly. This feature can be useful
in an IoT style scenario to detect errors across a potentially large scale
deployment of devices.
## Debug Logging
Detailed protocol logging (e.g. packets in/out) can be activated via the
following steps:
1. Open `<ARTEMIS_INSTANCE>/etc/logging.properties`
2. Add `org.apache.activemq.artemis.core.protocol.mqtt` to the `loggers` list.
3. Add this line to enable `TRACE` logging for this new logger:
`logger.org.apache.activemq.artemis.core.protocol.mqtt.level=TRACE`
4. Ensure the `level` for the `handler` you want to log the message doesn't
block the `TRACE` logging. For example, modify the `level` of the `CONSOLE`
`handler` like so: `handler.CONSOLE.level=TRACE`.
The MQTT specification doesn't dictate the format of the payloads which clients
publish. As far as the broker is concerned a payload is just just an array of
bytes. However, to facilitate logging the broker will encode the payloads as
UTF-8 strings and print them up to 256 characters. Payload logging is limited
to avoid filling the logs with potentially hundreds of megabytes of unhelpful
information.
## Wild card subscriptions
MQTT addresses are hierarchical much like a file system, and they use a special
character (i.e. `/` by default) to separate hierarchical levels. Subscribers
are able to subscribe to specific topics or to whole branches of a hierarchy.
To subscribe to branches of an address hierarchy a subscriber can use wild
cards. These wild cards (including the aforementioned separator) are
configurable. See the [Wildcard
Syntax](wildcard-syntax.md#customizing-the-syntax) chapter for details about
how to configure custom wild cards.
There are 2 types of wild cards in MQTT:
- **Multi level** (`#` by default)
Adding this wild card to an address would match all branches of the address
hierarchy under a specified node. For example: `/uk/#` Would match
`/uk/cities`, `/uk/cities/newcastle` and also `/uk/rivers/tyne`. Subscribing to
an address `#` would result in subscribing to all topics in the broker. This
can be useful, but should be done so with care since it has significant
performance implications.
- **Single level** (`+` by default)
Matches a single level in the address hierarchy. For example `/uk/+/stores`
would match `/uk/newcastle/stores` but not `/uk/cities/newcastle/stores`.

View File

@ -1,15 +1,18 @@
# Network Isolation (Split Brain)
It is possible that if a replicated live or backup server becomes isolated in a network that failover will occur and you will end up
with 2 live servers serving messages in a cluster, this we call split brain. There are different configurations you can choose
from that will help mitigate this problem
It is possible that if a replicated live or backup server becomes isolated in a
network that failover will occur and you will end up with 2 live servers
serving messages in a cluster, this we call split brain. There are different
configurations you can choose from that will help mitigate this problem
## Quorum Voting
Quorum voting is used by both the live and the backup to decide what to do if a replication connection is disconnected.
Basically the server will request each live server in the cluster to vote as to whether it thinks the server it is replicating
to or from is still alive. You can also configure the time for which the quorum manager will wait for the quorum vote response.
The default time is 30 sec you can configure like so for master and also for the slave:
Quorum voting is used by both the live and the backup to decide what to do if a
replication connection is disconnected. Basically the server will request each
live server in the cluster to vote as to whether it thinks the server it is
replicating to or from is still alive. You can also configure the time for which
the quorum manager will wait for the quorum vote response. The default time is 30
seconds you can configure like so for master and also for the slave:
```xml
<ha-policy>
@ -21,18 +24,23 @@ The default time is 30 sec you can configure like so for master and also for the
</ha-policy>
```
This being the case the minimum number of live/backup pairs needed is 3. If less than 3 pairs
are used then the only option is to use a Network Pinger which is explained later in this chapter or choose how you want each server to
react which the following details:
This being the case the minimum number of live/backup pairs needed is 3. If less
than 3 pairs are used then the only option is to use a Network Pinger which is
explained later in this chapter or choose how you want each server to react which
the following details:
### Backup Voting
By default if a replica loses its replication connection to the live broker it makes a decision as to whether to start or not
with a quorum vote. This of course requires that there be at least 3 pairs of live/backup nodes in the cluster. For a 3 node
cluster it will start if it gets 2 votes back saying that its live server is no longer available, for 4 nodes this would be
3 votes and so on. When a backup loses connection to the master it will keep voting for a quorum until it either receives a vote
allowing it to start or it detects that the master is still live. for the latter it will then restart as a backup. How many votes
and how long between each vote the backup should wait is configured like so:
By default if a replica loses its replication connection to the live broker it
makes a decision as to whether to start or not with a quorum vote. This of
course requires that there be at least 3 pairs of live/backup nodes in the
cluster. For a 3 node cluster it will start if it gets 2 votes back saying that
its live server is no longer available, for 4 nodes this would be 3 votes and
so on. When a backup loses connection to the master it will keep voting for a
quorum until it either receives a vote allowing it to start or it detects that
the master is still live. for the latter it will then restart as a backup. How
many votes and how long between each vote the backup should wait is configured
like so:
```xml
<ha-policy>
@ -45,8 +53,9 @@ and how long between each vote the backup should wait is configured like so:
</ha-policy>
```
It's also possible to statically set the quorum size that should be used for the case where the cluster size is known up front,
this is done on the Replica Policy like so:
It's also possible to statically set the quorum size that should be used for
the case where the cluster size is known up front, this is done on the Replica
Policy like so:
```xml
<ha-policy>
@ -58,16 +67,18 @@ this is done on the Replica Policy like so:
</ha-policy>
```
In this example the quorum size is set to 2 so if you were using a single pair and the backup lost connectivity it would
never start.
In this example the quorum size is set to 2 so if you were using a single pair
and the backup lost connectivity it would never start.
### Live Voting
By default, if the live server loses its replication connection then it will just carry on and wait for a backup to reconnect
and start replicating again. In the event of a possible split brain scenario this may mean that the live stays live even though
the backup has been activated. It is possible to configure the live server to vote for a quorum if this happens, in this way
if the live server doesn't not receive a majority vote then it will shutdown. This is done by setting the _vote-on-replication-failure_
to true.
By default, if the live server loses its replication connection then it will
just carry on and wait for a backup to reconnect and start replicating again.
In the event of a possible split brain scenario this may mean that the live
stays live even though the backup has been activated. It is possible to
configure the live server to vote for a quorum if this happens, in this way if
the live server doesn't not receive a majority vote then it will shutdown. This
is done by setting the _vote-on-replication-failure_ to true.
```xml
<ha-policy>
@ -79,22 +90,24 @@ to true.
</replication>
</ha-policy>
```
As in the backup policy it is also possible to statically configure the quorum size.
As in the backup policy it is also possible to statically configure the quorum
size.
## Pinging the network
You may configure one more addresses on the broker.xml that are part of your network topology, that will be pinged through the life cycle of the server.
You may configure one more addresses on the broker.xml that are part of your
network topology, that will be pinged through the life cycle of the server.
The server will stop itself until the network is back on such case.
If you execute the create command passing a -ping argument, you will create a default xml that is ready to be used with network checks:
If you execute the create command passing a -ping argument, you will create a
default xml that is ready to be used with network checks:
```
./artemis create /myDir/myServer --ping 10.0.0.1
```
This XML part will be added to your broker.xml:
```xml
@ -126,10 +139,8 @@ Use this to use an HTTP server to validate the network
```
Once you lose connectivity towards 10.0.0.1 on the given example
, you will see see this output at the server:
Once you lose connectivity towards 10.0.0.1 on the given example, you will see
see this output at the server:
```
09:49:24,562 WARN [org.apache.activemq.artemis.core.server.NetworkHealthCheck] Ping Address /10.0.0.1 wasn't reacheable
@ -178,8 +189,9 @@ Once you re establish your network connections towards the configured check list
09:53:23,556 INFO [org.apache.activemq.artemis.core.server] AMQ221001: Apache ActiveMQ Artemis Message Broker version 1.6.0 [0.0.0.0, nodeID=04fd5dd8-b18c-11e6-9efe-6a0001921ad0]
```
# Warning
> Make sure you understand your network topology as this is meant to validate your network.
> Using IPs that could eventually disappear or be partially visible may defeat the purpose.
> You can use a list of multiple IPs. Any successful ping will make the server OK to continue running
> ## Warning
>
> Make sure you understand your network topology as this is meant to validate
> your network. Using IPs that could eventually disappear or be partially
> visible may defeat the purpose. You can use a list of multiple IPs. Any
> successful ping will make the server OK to continue running

View File

@ -0,0 +1,112 @@
# OpenWire
Apache ActiveMQ Artemis supports the
[OpenWire](http://activemq.apache.org/openwire.html) protocol so that an Apache
ActiveMQ 5.x JMS client can talk directly to an Apache ActiveMQ Artemis server.
By default there is an `acceptor` configured to accept OpenWire connections on
port `61616`.
See the general [Protocols and Interoperability](protocols-interoperability.md)
chapter for details on configuring an `acceptor` for OpenWire.
Refer to the OpenWire examples for a look at this functionality in action.
## Connection Monitoring
OpenWire has a few parameters to control how each connection is monitored, they
are:
- `maxInactivityDuration`
It specifies the time (milliseconds) after which the connection is closed by
the broker if no data was received. Default value is 30000.
- `maxInactivityDurationInitalDelay`
It specifies the maximum delay (milliseconds) before inactivity monitoring is
started on the connection. It can be useful if a broker is under load with many
connections being created concurrently. Default value is 10000.
- `useInactivityMonitor`
A value of false disables the InactivityMonitor completely and connections
will never time out. By default it is enabled. On broker side you don't neet
set this. Instead you can set the connection-ttl to -1.
- `useKeepAlive`
Whether or not to send a KeepAliveInfo on an idle connection to prevent it
from timing out. Enabled by default. Disabling the keep alive will still make
connections time out if no data was received on the connection for the
specified amount of time.
Note at the beginning the InactivityMonitor negotiates the appropriate
`maxInactivityDuration` and `maxInactivityDurationInitalDelay`. The shortest
duration is taken for the connection.
Fore more details please see [ActiveMQ
InactivityMonitor](http://activemq.apache.org/activemq-inactivitymonitor.html).
## Disable/Enable Advisories
By default, advisory topics ([ActiveMQ
Advisory](http://activemq.apache.org/advisory-message.html)) are created in
order to send certain type of advisory messages to listening clients. As a
result, advisory addresses and queues will be displayed on the management
console, along with user deployed addresses and queues. This sometimes cause
confusion because the advisory objects are internally managed without user
being aware of them. In addition, users may not want the advisory topics at all
(they cause extra resources and performance penalty) and it is convenient to
disable them at all from the broker side.
The protocol provides two parameters to control advisory behaviors on the
broker side.
- `supportAdvisory`
Whether or not the broker supports advisory messages. If the value is true,
advisory addresses/queues will be created. If the value is false, no advisory
addresses/queues are created. Default value is `true`.
- `suppressInternalManagementObjects`
Whether or not the advisory addresses/queues, if any, will be registered to
management service (e.g. JMX registry). If set to true, no advisory
addresses/queues will be registered. If set to false, those are registered and
will be displayed on the management console. Default value is `true`.
The two parameters are configured on an OpenWire `acceptor`, e.g.:
```xml
<acceptor name="artemis">tcp://localhost:61616?protocols=OPENWIRE;supportAdvisory=true;suppressInternalManagementObjects=false</acceptor>
```
## Virtual Topic Consumer Destination Translation
For existing OpenWire consumers of virtual topic destinations it is possible to
configure a mapping function that will translate the virtual topic consumer
destination into a FQQN address. This address then represents the consumer as a
multicast binding to an address representing the virtual topic.
The configuration string property `virtualTopicConsumerWildcards` has two parts
seperated by a `;`. The first is the 5.x style destination filter that
identifies the destination as belonging to a virtual topic. The second
identifies the number of `paths` that identify the consumer queue such that it
can be parsed from the destination. For example, the default 5.x virtual topic
with consumer prefix of `Consumer.*.`, would require a
`virtualTopicConsumerWildcards` filter of `Consumer.*.>;2`. As a url parameter
this transforms to `Consumer.*.%3E%3B2` when the url significant characters
`>;` are escaped with their hex code points. In an `acceptor` url it would be:
```xml
<acceptor name="artemis">tcp://localhost:61616?protocols=OPENWIRE;virtualTopicConsumerWildcards=Consumer.*.%3E%3B2</acceptor>
```
This will translate `Consumer.A.VirtualTopic.Orders` into a FQQN of
`VirtualTopic.Orders::Consumer.A` using the int component `2` of the
configuration to identify the consumer queue as the first two paths of the
destination. `virtualTopicConsumerWildcards` is multi valued using a `,`
separator.
Please see Virtual Topic Mapping example contained in the OpenWire
[examples](examples.md).

View File

@ -1,77 +1,70 @@
# Paging
Apache ActiveMQ Artemis transparently supports huge queues containing millions of
messages while the server is running with limited memory.
Apache ActiveMQ Artemis transparently supports huge queues containing millions
of messages while the server is running with limited memory.
In such a situation it's not possible to store all of the queues in
memory at any one time, so Apache ActiveMQ Artemis transparently *pages* messages into
and out of memory as they are needed, thus allowing massive queues with
a low memory footprint.
In such a situation it's not possible to store all of the queues in memory at
any one time, so Apache ActiveMQ Artemis transparently *pages* messages into
and out of memory as they are needed, thus allowing massive queues with a low
memory footprint.
Apache ActiveMQ Artemis will start paging messages to disk, when the size of all
messages in memory for an address exceeds a configured maximum size.
Apache ActiveMQ Artemis will start paging messages to disk, when the size of
all messages in memory for an address exceeds a configured maximum size.
The default configuration from Artemis has destinations with paging.
## Page Files
Messages are stored per address on the file system. Each address has an
individual folder where messages are stored in multiple files (page
files). Each file will contain messages up to a max configured size
(`page-size-bytes`). The system will navigate on the files as needed,
and it will remove the page file as soon as all the messages are
acknowledged up to that point.
individual folder where messages are stored in multiple files (page files).
Each file will contain messages up to a max configured size
(`page-size-bytes`). The system will navigate the files as needed, and it
will remove the page file as soon as all the messages are acknowledged up to
that point.
Browsers will read through the page-cursor system.
Consumers with selectors will also navigate through the page-files and it will ignore messages that don't match the criteria.
Consumers with selectors will also navigate through the page-files and it will
ignore messages that don't match the criteria.
> *Warning:*
> When you have a queue, and consumers filtering the queue with a very restrictive selector you may get into a situation where you won't be able to read more data from paging until you consume messages from the queue.
>
> Example: in one consumer you make a selector as 'color="red"'
> but you only have one color red 1 millions messages after blue, you won't be able to consume red until you consume blue ones.
> When you have a queue, and consumers filtering the queue with a very
> restrictive selector you may get into a situation where you won't be able to
> read more data from paging until you consume messages from the queue.
>
> This is different to browsing as we will "browse" the entire queue looking for messages and while we "depage" messages while feeding the queue.
> Example: in one consumer you make a selector as 'color="red"' but you only
> have one color red 1 millions messages after blue, you won't be able to
> consume red until you consume blue ones.
>
> This is different to browsing as we will "browse" the entire queue looking
> for messages and while we "depage" messages while feeding the queue.
### Configuration
You can configure the location of the paging folder
You can configure the location of the paging folder in `broker.xml`.
Global paging parameters are specified on the main configuration file
(`broker.xml`).
<configuration xmlns="urn:activemq"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:activemq /schema/artemis-server.xsd">
...
<paging-directory>/somewhere/paging-directory</paging-directory>
...
Property Name Description Default
-------------------- --------------------------------------------------------------------------------------------------------------------------- -------------
`paging-directory` Where page files are stored. Apache ActiveMQ Artemis will create one folder for each address being paged under this configured location. data/paging
: Paging Configuration Parameters
- `paging-directory` Where page files are stored. Apache ActiveMQ Artemis will
create one folder for each address being paged under this configured
location. Default is `data/paging`.
## Paging Mode
As soon as messages delivered to an address exceed the configured size,
that address alone goes into page mode.
> **Note**
> **Note:**
>
> Paging is done individually per address. If you configure a
> max-size-bytes for an address, that means each matching address will
> have a maximum size that you specified. It DOES NOT mean that the
> total overall size of all matching addresses is limited to
> max-size-bytes.
> Paging is done individually per address. If you configure a max-size-bytes
> for an address, that means each matching address will have a maximum size
> that you specified. It DOES NOT mean that the total overall size of all
> matching addresses is limited to max-size-bytes.
### Configuration
Configuration is done at the address settings, done at the main
configuration file (`broker.xml`).
Configuration is done at the address settings in `broker.xml`.
```xml
<address-settings>
@ -85,117 +78,90 @@ configuration file (`broker.xml`).
This is the list of available parameters on the address settings.
<table summary="Server Configuration" border="1">
<colgroup>
<col/>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th>Property Name</th>
<th>Description</th>
<th>Default</th>
</tr>
</thead>
<tbody>
<tr>
<td>`max-size-bytes`</td>
<td>What's the max memory the address could have before entering on page mode.</td>
<td>-1 (disabled)</td>
</tr>
<tr>
<td>`page-size-bytes`</td>
<td>The size of each page file used on the paging system</td>
<td>10MiB (10 \* 1024 \* 1024 bytes)</td>
</tr>
<tr>
<td>`address-full-policy`</td>
<td>This must be set to PAGE for paging to enable. If the value is PAGE then further messages will be paged to disk. If the value is DROP then further messages will be silently dropped. If the value is FAIL then the messages will be dropped and the client message producers will receive an exception. If the value is BLOCK then client message producers will block when they try and send further messages.</td>
<td>PAGE</td>
</tr>
<tr>
<td>`page-max-cache-size`</td>
<td>The system will keep up to `page-max-cache-size` page files in memory to optimize IO during paging navigation.</td>
<td>5</td>
</tr>
</tbody>
</table>
Property Name|Description|Default
---|---|---
`max-size-bytes`|What's the max memory the address could have before entering on page mode.|-1 (disabled)
`page-size-bytes`|The size of each page file used on the paging system|10MB
`address-full-policy`|This must be set to `PAGE` for paging to enable. If the value is `PAGE` then further messages will be paged to disk. If the value is `DROP` then further messages will be silently dropped. If the value is `FAIL` then the messages will be dropped and the client message producers will receive an exception. If the value is `BLOCK` then client message producers will block when they try and send further messages.|`PAGE`
`page-max-cache-size`|The system will keep up to `page-max-cache-size` page files in memory to optimize IO during paging navigation.|5
## Global Max Size
Beyond the max-size-bytes on the address you can also set the global-max-size on the main configuration. If you set max-size-bytes = -1 on paging the global-max-size can still be used.
Beyond the `max-size-bytes` on the address you can also set the global-max-size
on the main configuration. If you set `max-size-bytes` = `-1` on paging the
`global-max-size` can still be used.
When you have more messages than what is configured global-max-size any new produced message will make that destination to go through its paging policy.
When you have more messages than what is configured `global-max-size` any new
produced message will make that destination to go through its paging policy.
global-max-size is calculated as half of the max memory available to the Java Virtual Machine, unless specified on the broker.xml configuration.
`global-max-size` is calculated as half of the max memory available to the Java
Virtual Machine, unless specified on the `broker.xml` configuration.
## Dropping messages
Instead of paging messages when the max size is reached, an address can
also be configured to just drop messages when the address is full.
Instead of paging messages when the max size is reached, an address can also be
configured to just drop messages when the address is full.
To do this just set the `address-full-policy` to `DROP` in the address
settings
To do this just set the `address-full-policy` to `DROP` in the address settings
## Dropping messages and throwing an exception to producers
Instead of paging messages when the max size is reached, an address can
also be configured to drop messages and also throw an exception on the
client-side when the address is full.
Instead of paging messages when the max size is reached, an address can also be
configured to drop messages and also throw an exception on the client-side when
the address is full.
To do this just set the `address-full-policy` to `FAIL` in the address
settings
To do this just set the `address-full-policy` to `FAIL` in the address settings
## Blocking producers
Instead of paging messages when the max size is reached, an address can
also be configured to block producers from sending further messages when
the address is full, thus preventing the memory being exhausted on the
server.
Instead of paging messages when the max size is reached, an address can also be
configured to block producers from sending further messages when the address is
full, thus preventing the memory being exhausted on the server.
When memory is freed up on the server, producers will automatically
unblock and be able to continue sending.
When memory is freed up on the server, producers will automatically unblock and
be able to continue sending.
To do this just set the `address-full-policy` to `BLOCK` in the address
settings
In the default configuration, all addresses are configured to block
producers after 10 MiB of data are in the address.
In the default configuration, all addresses are configured to block producers
after 10 MiB of data are in the address.
## Caution with Addresses with Multiple Multicast Queues
When a message is routed to an address that has multiple multicast queues bound to
it, e.g. a JMS subscription in a Topic, there is only 1 copy of the
message in memory. Each queue only deals with a reference to this.
Because of this the memory is only freed up once all queues referencing
the message have delivered it.
When a message is routed to an address that has multiple multicast queues bound
to it, e.g. a JMS subscription in a Topic, there is only 1 copy of the message
in memory. Each queue only deals with a reference to this. Because of this the
memory is only freed up once all queues referencing the message have delivered
it.
If you have a single lazy subscription, the entire address will suffer
IO performance hit as all the queues will have messages being sent
through an extra storage on the paging system.
If you have a single lazy subscription, the entire address will suffer IO
performance hit as all the queues will have messages being sent through an
extra storage on the paging system.
For example:
- An address has 10 multicast queues
- An address has 10 multicast queues
- One of the queues does not deliver its messages (maybe because of a
slow consumer).
- One of the queues does not deliver its messages (maybe because of a
slow consumer).
- Messages continually arrive at the address and paging is started.
- Messages continually arrive at the address and paging is started.
- The other 9 queues are empty even though messages have been sent.
- The other 9 queues are empty even though messages have been sent.
In this example all the other 9 queues will be consuming messages from
the page system. This may cause performance issues if this is an
undesirable state.
In this example all the other 9 queues will be consuming messages from the page
system. This may cause performance issues if this is an undesirable state.
## Max Disk Usage
The System will perform scans on the disk to determine if the disk is beyond a configured limit.
These are configured through 'max-disk-usage' in percentage. Once that limit is reached any
message will be blocked. (unless the protocol doesn't support flow control on which case there will be an exception thrown and the connection for those clients dropped).
The System will perform scans on the disk to determine if the disk is beyond a
configured limit. These are configured through `max-disk-usage` in percentage.
Once that limit is reached any message will be blocked. (unless the protocol
doesn't support flow control on which case there will be an exception thrown
and the connection for those clients dropped).
## Example
See the [examples](examples.md) chapter for an example which shows how to use paging with Apache ActiveMQ Artemis.
See the [Paging Example](examples.md#paging) which shows how to use paging with
Apache ActiveMQ Artemis.

View File

@ -5,269 +5,251 @@ performance.
## Tuning persistence
- To get the best performance from Apache ActiveMQ Artemis whilst
using persistent messages it is recommended that the file store
is used. Apache ActiveMQ Artemis also supports JDBC persistence,
but there is a performance cost when persisting to a database vs
local disk.
- Put the message journal on its own physical volume. If the disk is
shared with other processes e.g. transaction co-ordinator, database
or other journals which are also reading and writing from it, then
this may greatly reduce performance since the disk head may be
skipping all over the place between the different files. One of the
advantages of an append only journal is that disk head movement is
minimised - this advantage is destroyed if the disk is shared. If
you're using paging or large messages make sure they're ideally put
on separate volumes too.
- To get the best performance from Apache ActiveMQ Artemis whilst using
persistent messages it is recommended that the file store is used. Apache
ActiveMQ Artemis also supports JDBC persistence, but there is a performance
cost when persisting to a database vs local disk.
- Minimum number of journal files. Set `journal-min-files` to a number
of files that would fit your average sustainable rate. This number
represents the lower threshold of the journal file pool.
- Put the message journal on its own physical volume. If the disk is shared
with other processes e.g. transaction co-ordinator, database or other
journals which are also reading and writing from it, then this may greatly
reduce performance since the disk head may be skipping all over the place
between the different files. One of the advantages of an append only journal is
that disk head movement is minimised - this advantage is destroyed if the disk
is shared. If you're using paging or large messages make sure they're ideally
put on separate volumes too.
- To set the upper threshold of the journal file pool. (`journal-min-files` being
the lower threshold). Set `journal-pool-files` to a number that represents
something near your maximum expected load. The journal will spill over
the pool should it need to, but will shrink back to the upper threshold,
when possible. This allows reuse of files, without taking up more disk
space than required. If you see new files being created on the journal
data directory too often, i.e. lots of data is being persisted,
you need to increase the journal-pool-size, this way the journal would
reuse more files instead of creating new data files, increasing performance
- Minimum number of journal files. Set `journal-min-files` to a number of files
that would fit your average sustainable rate. This number represents the
lower threshold of the journal file pool.
- Journal file size. The journal file size should be aligned to the
capacity of a cylinder on the disk. The default value 10MiB should
be enough on most systems.
- To set the upper threshold of the journal file pool. (`journal-min-files` being
the lower threshold). Set `journal-pool-files` to a number that represents
something near your maximum expected load. The journal will spill over the
pool should it need to, but will shrink back to the upper threshold, when
possible. This allows reuse of files, without taking up more disk space than
required. If you see new files being created on the journal data directory too
often, i.e. lots of data is being persisted, you need to increase the
journal-pool-size, this way the journal would reuse more files instead of
creating new data files, increasing performance
- Use AIO journal. If using Linux, try to keep your journal type as
AIO. AIO will scale better than Java NIO.
- Journal file size. The journal file size should be aligned to the capacity of
a cylinder on the disk. The default value 10MiB should be enough on most
systems.
- Tune `journal-buffer-timeout`. The timeout can be increased to
increase throughput at the expense of latency.
- Use `ASYNCIO` journal. If using Linux, try to keep your journal type as
`ASYNCIO`. `ASYNCIO` will scale better than Java NIO.
- If you're running AIO you might be able to get some better
performance by increasing `journal-max-io`. DO NOT change this
parameter if you are running NIO.
- If you are 100% sure you don't need power failure durability guarantees,
disable `journal-data-sync` and use `NIO` or `MAPPED` journal:
you'll benefit a huge performance boost on writes
with process failure durability guarantees.
- Tune `journal-buffer-timeout`. The timeout can be increased to increase
throughput at the expense of latency.
- If you're running `ASYNCIO` you might be able to get some better performance by
increasing `journal-max-io`. DO NOT change this parameter if you are running
NIO.
- If you are 100% sure you don't need power failure durability guarantees,
disable `journal-data-sync` and use `NIO` or `MAPPED` journal: you'll benefit
a huge performance boost on writes with process failure durability guarantees.
## Tuning JMS
There are a few areas where some tweaks can be done if you are using the
JMS API
There are a few areas where some tweaks can be done if you are using the JMS
API
- Disable message id. Use the `setDisableMessageID()` method on the
`MessageProducer` class to disable message ids if you don't need
them. This decreases the size of the message and also avoids the
overhead of creating a unique ID.
- Disable message id. Use the `setDisableMessageID()` method on the
`MessageProducer` class to disable message ids if you don't need them. This
decreases the size of the message and also avoids the overhead of creating a
unique ID.
- Disable message timestamp. Use the `setDisableMessageTimeStamp()`
method on the `MessageProducer` class to disable message timestamps
if you don't need them.
- Disable message timestamp. Use the `setDisableMessageTimeStamp()` method on
the `MessageProducer` class to disable message timestamps if you don't need
them.
- Avoid `ObjectMessage`. `ObjectMessage` is convenient but it comes at
a cost. The body of a `ObjectMessage` uses Java serialization to
serialize it to bytes. The Java serialized form of even small
objects is very verbose so takes up a lot of space on the wire, also
Java serialization is slow compared to custom marshalling
techniques. Only use `ObjectMessage` if you really can't use one of
the other message types, i.e. if you really don't know the type of
the payload until run-time.
- Avoid `ObjectMessage`. `ObjectMessage` is convenient but it comes at a cost.
The body of a `ObjectMessage` uses Java serialization to serialize it to
bytes. The Java serialized form of even small objects is very verbose so takes
up a lot of space on the wire, also Java serialization is slow compared to
custom marshalling techniques. Only use `ObjectMessage` if you really can't use
one of the other message types, i.e. if you really don't know the type of the
payload until run-time.
- Avoid `AUTO_ACKNOWLEDGE`. `AUTO_ACKNOWLEDGE` mode requires an
acknowledgement to be sent from the server for each message received
on the client, this means more traffic on the network. If you can,
use `DUPS_OK_ACKNOWLEDGE` or use `CLIENT_ACKNOWLEDGE` or a
transacted session and batch up many acknowledgements with one
acknowledge/commit.
- Avoid `AUTO_ACKNOWLEDGE`. `AUTO_ACKNOWLEDGE` mode requires an acknowledgement
to be sent from the server for each message received on the client, this
means more traffic on the network. If you can, use `DUPS_OK_ACKNOWLEDGE` or use
`CLIENT_ACKNOWLEDGE` or a transacted session and batch up many acknowledgements
with one acknowledge/commit.
- Avoid durable messages. By default JMS messages are durable. If you
don't really need durable messages then set them to be non-durable.
Durable messages incur a lot more overhead in persisting them to
storage.
- Avoid durable messages. By default JMS messages are durable. If you don't
really need durable messages then set them to be non-durable. Durable
messages incur a lot more overhead in persisting them to storage.
- Batch many sends or acknowledgements in a single transaction.
Apache ActiveMQ Artemis will only require a network round trip on the commit, not
on every send or acknowledgement.
- Batch many sends or acknowledgements in a single transaction. Apache
ActiveMQ Artemis will only require a network round trip on the commit, not on
every send or acknowledgement.
## Other Tunings
There are various other places in Apache ActiveMQ Artemis where we can perform some
tuning:
There are various other places in Apache ActiveMQ Artemis where we can perform
some tuning:
- Use Asynchronous Send Acknowledgements. If you need to send durable
messages non transactionally and you need a guarantee that they have
reached the server by the time the call to send() returns, don't set
durable messages to be sent blocking, instead use asynchronous send
acknowledgements to get your acknowledgements of send back in a
separate stream, see [Guarantees of sends and commits](send-guarantees.md)
for more information on this.
- Use Asynchronous Send Acknowledgements. If you need to send durable messages
non transactionally and you need a guarantee that they have reached the
server by the time the call to send() returns, don't set durable messages to be
sent blocking, instead use asynchronous send acknowledgements to get your
acknowledgements of send back in a separate stream, see [Guarantees of sends
and commits](send-guarantees.md) for more information on this.
- Use pre-acknowledge mode. With pre-acknowledge mode, messages are
acknowledged `before` they are sent to the client. This reduces the
amount of acknowledgement traffic on the wire. For more information
on this, see [Extra Acknowledge Modes](pre-acknowledge.md).
- Use pre-acknowledge mode. With pre-acknowledge mode, messages are
acknowledged `before` they are sent to the client. This reduces the amount of
acknowledgement traffic on the wire. For more information on this, see [Extra
Acknowledge Modes](pre-acknowledge.md).
- Disable security. You may get a small performance boost by disabling
security by setting the `security-enabled` parameter to `false` in
`broker.xml`.
- Disable security. You may get a small performance boost by disabling security
by setting the `security-enabled` parameter to `false` in `broker.xml`.
- Disable persistence. If you don't need message persistence, turn it
off altogether by setting `persistence-enabled` to false in
`broker.xml`.
- Disable persistence. If you don't need message persistence, turn it off
altogether by setting `persistence-enabled` to false in `broker.xml`.
- Sync transactions lazily. Setting `journal-sync-transactional` to
`false` in `broker.xml` can give you better
transactional persistent performance at the expense of some
possibility of loss of transactions on failure. See [Guarantees of sends and commits](send-guarantees.md)
for more information.
- Sync transactions lazily. Setting `journal-sync-transactional` to `false` in
`broker.xml` can give you better transactional persistent performance at the
expense of some possibility of loss of transactions on failure. See
[Guarantees of sends and commits](send-guarantees.md) for more information.
- Sync non transactional lazily. Setting
`journal-sync-non-transactional` to `false` in
`broker.xml` can give you better non-transactional
persistent performance at the expense of some possibility of loss of
durable messages on failure. See [Guarantees of sends and commits](send-guarantees.md)
for more information.
- Sync non transactional lazily. Setting `journal-sync-non-transactional` to
`false` in `broker.xml` can give you better non-transactional persistent
performance at the expense of some possibility of loss of durable messages on
failure. See [Guarantees of sends and commits](send-guarantees.md) for more
information.
- Send messages non blocking. Setting `block-on-durable-send` and
`block-on-non-durable-send` to `false` in the jms config (if
you're using JMS and JNDI) or directly on the ServerLocator. This
means you don't have to wait a whole network round trip for every
message sent. See [Guarantees of sends and commits](send-guarantees.md)
for more information.
- Send messages non blocking. Setting `block-on-durable-send` and
`block-on-non-durable-send` to `false` in the jms config (if you're using JMS
and JNDI) or directly on the ServerLocator. This means you don't have to wait a
whole network round trip for every message sent. See [Guarantees of sends and
commits](send-guarantees.md) for more information.
- If you have very fast consumers, you can increase
consumer-window-size. This effectively disables consumer flow
control.
- If you have very fast consumers, you can increase consumer-window-size. This
effectively disables consumer flow control.
- Use the core API not JMS. Using the JMS API you will have slightly
lower performance than using the core API, since all JMS operations
need to be translated into core operations before the server can
handle them. If using the core API try to use methods that take
`SimpleString` as much as possible. `SimpleString`, unlike
java.lang.String does not require copying before it is written to
the wire, so if you re-use `SimpleString` instances between calls
then you can avoid some unnecessary copying.
- If using frameworks like Spring, configure destinations permanently broker side
and enable `destinationCache` on the client side.
See the [Setting The Destination Cache](using-jms.md)
for more information on this.
- Use the core API not JMS. Using the JMS API you will have slightly lower
performance than using the core API, since all JMS operations need to be
translated into core operations before the server can handle them. If using the
core API try to use methods that take `SimpleString` as much as possible.
`SimpleString`, unlike java.lang.String does not require copying before it is
written to the wire, so if you re-use `SimpleString` instances between calls
then you can avoid some unnecessary copying.
- If using frameworks like Spring, configure destinations permanently broker
side and enable `destinationCache` on the client side. See the [Setting The
Destination Cache](using-jms.md) for more information on this.
## Tuning Transport Settings
- TCP buffer sizes. If you have a fast network and fast machines you
may get a performance boost by increasing the TCP send and receive
buffer sizes. See the [Configuring the Transport](configuring-transports.md)
for more information on this.
- TCP buffer sizes. If you have a fast network and fast machines you may get a
performance boost by increasing the TCP send and receive buffer sizes. See
the [Configuring the Transport](configuring-transports.md) for more information
on this.
> **Note**
>
> Note that some operating systems like later versions of Linux
> include TCP auto-tuning and setting TCP buffer sizes manually can
> prevent auto-tune from working and actually give you worse
> performance!
> **Note:**
>
> Note that some operating systems like later versions of Linux include TCP
> auto-tuning and setting TCP buffer sizes manually can prevent auto-tune
> from working and actually give you worse performance!
- Increase limit on file handles on the server. If you expect a lot of
concurrent connections on your servers, or if clients are rapidly
opening and closing connections, you should make sure the user
running the server has permission to create sufficient file handles.
- Increase limit on file handles on the server. If you expect a lot of
concurrent connections on your servers, or if clients are rapidly opening and
closing connections, you should make sure the user running the server has
permission to create sufficient file handles.
This varies from operating system to operating system. On Linux
systems you can increase the number of allowable open file handles
in the file `/etc/security/limits.conf` e.g. add the lines
This varies from operating system to operating system. On Linux systems you
can increase the number of allowable open file handles in the file
`/etc/security/limits.conf` e.g. add the lines
serveruser soft nofile 20000
serveruser hard nofile 20000
```
serveruser soft nofile 20000
serveruser hard nofile 20000
```
This would allow up to 20000 file handles to be open by the user
`serveruser`.
This would allow up to 20000 file handles to be open by the user
`serveruser`.
- Use `batch-delay` and set `direct-deliver` to false for the best
throughput for very small messages. Apache ActiveMQ Artemis comes with a
preconfigured connector/acceptor pair (`netty-throughput`) in
`broker.xml` and JMS connection factory
(`ThroughputConnectionFactory`) in `activemq-jms.xml`which can be
used to give the very best throughput, especially for small
messages. See the [Configuring the Transport](configuring-transports.md)
for more information on this.
- Use `batch-delay` and set `direct-deliver` to false for the best throughput
for very small messages. Apache ActiveMQ Artemis comes with a preconfigured
connector/acceptor pair (`netty-throughput`) in `broker.xml` and JMS connection
factory (`ThroughputConnectionFactory`) in `activemq-jms.xml`which can be used
to give the very best throughput, especially for small messages. See the
[Configuring the Transport](configuring-transports.md) for more information on
this.
## Tuning the VM
We highly recommend you use the latest Java JVM for the best
performance. We test internally using the Sun JVM, so some of these
tunings won't apply to JDKs from other providers (e.g. IBM or JRockit)
We highly recommend you use the latest Java JVM for the best performance. We
test internally using the Sun JVM, so some of these tunings won't apply to JDKs
from other providers (e.g. IBM or JRockit)
- Garbage collection. For smooth server operation we recommend using a
parallel garbage collection algorithm, e.g. using the JVM argument
`-XX:+UseParallelOldGC` on Sun JDKs.
- Garbage collection. For smooth server operation we recommend using a parallel
garbage collection algorithm, e.g. using the JVM argument
`-XX:+UseParallelOldGC` on Sun JDKs.
- Memory settings. Give as much memory as you can to the server.
Apache ActiveMQ Artemis can run in low memory by using paging (described in [Paging](paging.md)) but
if it can run with all queues in RAM this will improve performance.
The amount of memory you require will depend on the size and number
of your queues and the size and number of your messages. Use the JVM
arguments `-Xms` and `-Xmx` to set server available RAM. We
recommend setting them to the same high value.
When under periods of high load, it is likely that Artemis will be generating
and destroying lots of objects. This can result in a build up of stale objects.
To reduce the chance of running out of memory and causing a full GC
(which may introduce pauses and unintentional behaviour), it is recommended that the
max heap size (`-Xmx`) for the JVM is set at least to 5 x the `global-max-size` of the broker.
As an example, in a situation where the broker is under high load and running
with a `global-max-size` of 1GB, it is recommended the the max heap size is set to 5GB.
- Memory settings. Give as much memory as you can to the server. Apache
ActiveMQ Artemis can run in low memory by using paging (described in
[Paging](paging.md)) but if it can run with all queues in RAM this will improve
performance. The amount of memory you require will depend on the size and
number of your queues and the size and number of your messages. Use the JVM
arguments `-Xms` and `-Xmx` to set server available RAM. We recommend setting
them to the same high value.
When under periods of high load, it is likely that Artemis will be generating
and destroying lots of objects. This can result in a build up of stale objects.
To reduce the chance of running out of memory and causing a full GC (which may
introduce pauses and unintentional behaviour), it is recommended that the max
heap size (`-Xmx`) for the JVM is set at least to 5 x the `global-max-size` of
the broker. As an example, in a situation where the broker is under high load
and running with a `global-max-size` of 1GB, it is recommended the the max heap
size is set to 5GB.
- Aggressive options. Different JVMs provide different sets of JVM
tuning parameters, for the Sun Hotspot JVM the full list of options
is available
[here](http://www.oracle.com/technetwork/java/javase/tech/vmoptions-jsp-140102.html).
We recommend at least using `-XX:+AggressiveOpts`.
You may get some mileage with the other tuning parameters depending
on your OS platform and application usage patterns.
- Aggressive options. Different JVMs provide different sets of JVM tuning
parameters, for the Sun Hotspot JVM the full list of options is available
[here](http://www.oracle.com/technetwork/java/javase/tech/vmoptions-jsp-140102.html).
We recommend at least using `-XX:+AggressiveOpts`. You may get some mileage
with the other tuning parameters depending on your OS platform and application
usage patterns.
## Avoiding Anti-Patterns
- Re-use connections / sessions / consumers / producers. Probably the
most common messaging anti-pattern we see is users who create a new
connection/session/producer for every message they send or every
message they consume. This is a poor use of resources. These objects
take time to create and may involve several network round trips.
Always re-use them.
- Re-use connections / sessions / consumers / producers. Probably the most
common messaging anti-pattern we see is users who create a new
connection/session/producer for every message they send or every message they
consume. This is a poor use of resources. These objects take time to create and
may involve several network round trips. Always re-use them.
> **Note**
>
> Some popular libraries such as the Spring JMS Template are known
> to use these anti-patterns. If you're using Spring JMS Template
> and you're getting poor performance you know why. Don't blame
> Apache ActiveMQ Artemis! The Spring JMS Template can only safely be used in an
> app server which caches JMS sessions (e.g. using JCA), and only
> then for sending messages. It cannot be safely be used for
> synchronously consuming messages, even in an app server.
> **Note:**
>
> Some popular libraries such as the Spring JMS Template are known to use
> these anti-patterns. If you're using Spring JMS Template and you're getting
> poor performance you know why. Don't blame Apache ActiveMQ Artemis! The
> Spring JMS Template can only safely be used in an app server which caches
> JMS sessions (e.g. using JCA), and only then for sending messages. It
> cannot be safely be used for synchronously consuming messages, even in an
> app server.
- Avoid fat messages. Verbose formats such as XML take up a lot of
space on the wire and performance will suffer as result. Avoid XML
in message bodies if you can.
- Avoid fat messages. Verbose formats such as XML take up a lot of space on the
wire and performance will suffer as result. Avoid XML in message bodies if
you can.
- Don't create temporary queues for each request. This common
anti-pattern involves the temporary queue request-response pattern.
With the temporary queue request-response pattern a message is sent
to a target and a reply-to header is set with the address of a local
temporary queue. When the recipient receives the message they
process it then send back a response to the address specified in the
reply-to. A common mistake made with this pattern is to create a new
temporary queue on each message sent. This will drastically reduce
performance. Instead the temporary queue should be re-used for many
requests.
- Don't create temporary queues for each request. This common anti-pattern
involves the temporary queue request-response pattern. With the temporary
queue request-response pattern a message is sent to a target and a reply-to
header is set with the address of a local temporary queue. When the recipient
receives the message they process it then send back a response to the address
specified in the reply-to. A common mistake made with this pattern is to create
a new temporary queue on each message sent. This will drastically reduce
performance. Instead the temporary queue should be re-used for many requests.
- Don't use Message-Driven Beans for the sake of it. As soon as you
start using MDBs you are greatly increasing the codepath for each
message received compared to a straightforward message consumer,
since a lot of extra application server code is executed. Ask
yourself do you really need MDBs? Can you accomplish the same task
using just a normal message consumer?
- Don't use Message-Driven Beans for the sake of it. As soon as you start using
MDBs you are greatly increasing the codepath for each message received
compared to a straightforward message consumer, since a lot of extra
application server code is executed. Ask yourself do you really need MDBs? Can
you accomplish the same task using just a normal message consumer?
## Troubleshooting
@ -275,17 +257,30 @@ tunings won't apply to JDKs from other providers (e.g. IBM or JRockit)
In certain situations UDP used on discovery may not work. Typical situations are:
1. The nodes are behind a firewall. If your nodes are on different machines then it is possible that the firewall is blocking the multicasts. you can test this by disabling the firewall for each node or adding the appropriate rules.
2. You are using a home network or are behind a gateway. Typically home networks will redirect any UDP traffic to the Internet Service Provider which is then either dropped by the ISP or just lost. To fix this you will need to add a route to the firewall/gateway that will redirect any multicast traffic back on to the local network instead.
3. All the nodes are in one machine. If this is the case then it is a similar problem to point 2 and the same solution should fix it. Alternatively you could add a multicast route to the loopback interface. On linux the command would be:
```sh
# you should run this as root
route add -net 224.0.0.0 netmask 240.0.0.0 dev lo
```
This will redirect any traffic directed to the 224.0.0.0 to the loopback interface. This will also work if you have no network at all.
1. The nodes are behind a firewall. If your nodes are on different machines
then it is possible that the firewall is blocking the multicasts. you can
test this by disabling the firewall for each node or adding the appropriate
rules.
2. You are using a home network or are behind a gateway. Typically home
networks will redirect any UDP traffic to the Internet Service Provider
which is then either dropped by the ISP or just lost. To fix this you will need
to add a route to the firewall/gateway that will redirect any multicast traffic
back on to the local network instead.
3. All the nodes are in one machine. If this is the case then it is a similar
problem to point 2 and the same solution should fix it. Alternatively you
could add a multicast route to the loopback interface. On linux the command
would be:
* on Mac OS X, the command is slightly different:
```sh
sudo route add 224.0.0.0 127.0.0.1 -netmask 240.0.0.0
```
```sh
# you should run this as root
route add -net 224.0.0.0 netmask 240.0.0.0 dev lo
```
This will redirect any traffic directed to the 224.0.0.0 to the loopback
interface. This will also work if you have no network at all. On Mac OS X, the
command is slightly different:
```sh
sudo route add 224.0.0.0 127.0.0.1 -netmask 240.0.0.0
```

View File

@ -59,7 +59,7 @@ completion when AIO informs us that the data has been persisted.
Using AIO will typically provide even better performance than using Java NIO.
The AIO journal is only available when running Linux kernel 2.6 or
This journal option is only available when running Linux kernel 2.6 or
later and after having installed libaio (if it's not already
installed). For instructions on how to install libaio please see Installing AIO section.
@ -69,53 +69,53 @@ systems: ext2, ext3, ext4, jfs, xfs and NFSV4.
For more information on libaio please see [lib AIO](libaio.md).
libaio is part of the kernel project.
### [Memory mapped](https://en.wikipedia.org/wiki/Memory-mapped_file)
The third implementation uses a file-backed [READ_WRITE](https://docs.oracle.com/javase/8/docs/api/java/nio/channels/FileChannel.MapMode.html#READ_WRITE)
memory mapping against the OS page cache to interface with the file system.
This provides extremely good performance (especially under strictly process failure durability requirements),
almost zero copy (actually *is* the kernel page cache) and zero garbage (from the Java HEAP perspective) operations and runs
on any platform where there's a Java 4+ runtime.
Under power failure durability requirements it will perform at least on par with the NIO journal with the only
exception of Linux OS with kernel less or equals 2.6, in which the [*msync*](https://docs.oracle.com/javase/8/docs/api/java/nio/MappedByteBuffer.html#force%28%29)) implementation necessary to ensure
durable writes was different (and slower) from the [*fsync*](https://docs.oracle.com/javase/8/docs/api/java/nio/channels/FileChannel.html#force%28boolean%29) used is case of NIO journal.
It benefits by the configuration of OS [huge pages](https://en.wikipedia.org/wiki/Page_%28computer_memory%29),
in particular when is used a big number of journal files and sizing them as multiple of the OS page size in bytes.
in particular when is used a big number of journal files and sizing them as multiple of the OS page size in bytes.
### Standard Files
The standard Apache ActiveMQ Artemis core server uses two instances of the journal:
- Bindings journal.
- Bindings journal.
This journal is used to store bindings related data. That includes
the set of queues that are deployed on the server and their
attributes. It also stores data such as id sequence counters.
This journal is used to store bindings related data. That includes
the set of queues that are deployed on the server and their
attributes. It also stores data such as id sequence counters.
The bindings journal is always a NIO journal as it is typically low
throughput compared to the message journal.
The bindings journal is always a NIO journal as it is typically low
throughput compared to the message journal.
The files on this journal are prefixed as `activemq-bindings`. Each
file has a `bindings` extension. File size is `1048576`, and it is
located at the bindings folder.
The files on this journal are prefixed as `activemq-bindings`. Each
file has a `bindings` extension. File size is `1048576`, and it is
located at the bindings folder.
- Message journal.
- Message journal.
This journal instance stores all message related data, including the
message themselves and also duplicate-id caches.
This journal instance stores all message related data, including the
message themselves and also duplicate-id caches.
By default Apache ActiveMQ Artemis will try and use an AIO journal. If AIO is not
available, e.g. the platform is not Linux with the correct kernel
version or AIO has not been installed then it will automatically
fall back to using Java NIO which is available on any Java platform.
By default Apache ActiveMQ Artemis will try and use an AIO journal. If AIO is not
available, e.g. the platform is not Linux with the correct kernel
version or AIO has not been installed then it will automatically
fall back to using Java NIO which is available on any Java platform.
The files on this journal are prefixed as `activemq-data`. Each file
has a `amq` extension. File size is by the default `10485760`
(configurable), and it is located at the journal folder.
The files on this journal are prefixed as `activemq-data`. Each file
has a `amq` extension. File size is by the default `10485760`
(configurable), and it is located at the journal folder.
For large messages, Apache ActiveMQ Artemis persists them outside the message journal.
This is discussed in [Large Messages](large-messages.md).
@ -132,17 +132,17 @@ the broker for Zero Persistence section.
The bindings journal is configured using the following attributes in
`broker.xml`
- `bindings-directory`
- `bindings-directory`
This is the directory in which the bindings journal lives. The
default value is `data/bindings`.
This is the directory in which the bindings journal lives. The
default value is `data/bindings`.
- `create-bindings-dir`
- `create-bindings-dir`
If this is set to `true` then the bindings directory will be
automatically created at the location specified in
`bindings-directory` if it does not already exist. The default value
is `true`
If this is set to `true` then the bindings directory will be
automatically created at the location specified in
`bindings-directory` if it does not already exist. The default value
is `true`
#### Configuring the jms journal
@ -153,159 +153,158 @@ The jms config shares its configuration with the bindings journal.
The message journal is configured using the following attributes in
`broker.xml`
- `journal-directory`
- `journal-directory`
This is the directory in which the message journal lives. The
default value is `data/journal`.
This is the directory in which the message journal lives. The
default value is `data/journal`.
For the best performance, we recommend the journal is located on its
own physical volume in order to minimise disk head movement. If the
journal is on a volume which is shared with other processes which
might be writing other files (e.g. bindings journal, database, or
transaction coordinator) then the disk head may well be moving
rapidly between these files as it writes them, thus drastically
reducing performance.
For the best performance, we recommend the journal is located on its
own physical volume in order to minimise disk head movement. If the
journal is on a volume which is shared with other processes which
might be writing other files (e.g. bindings journal, database, or
transaction coordinator) then the disk head may well be moving
rapidly between these files as it writes them, thus drastically
reducing performance.
When the message journal is stored on a SAN we recommend each
journal instance that is stored on the SAN is given its own LUN
(logical unit).
When the message journal is stored on a SAN we recommend each
journal instance that is stored on the SAN is given its own LUN
(logical unit).
- `create-journal-dir`
- `create-journal-dir`
If this is set to `true` then the journal directory will be
automatically created at the location specified in
`journal-directory` if it does not already exist. The default value
is `true`
If this is set to `true` then the journal directory will be
automatically created at the location specified in
`journal-directory` if it does not already exist. The default value
is `true`
- `journal-type`
- `journal-type`
Valid values are `NIO`, `ASYNCIO` or `MAPPED`.
Valid values are `NIO`, `ASYNCIO` or `MAPPED`.
Choosing `NIO` chooses the Java NIO journal. Choosing `ASYNCIO` chooses
the Linux asynchronous IO journal. If you choose `ASYNCIO` but are not
running Linux or you do not have libaio installed then Apache ActiveMQ Artemis will
detect this and automatically fall back to using `NIO`.
Choosing `MAPPED` chooses the Java Memory Mapped journal.
Choosing `NIO` chooses the Java NIO journal. Choosing `ASYNCIO` chooses
the Linux asynchronous IO journal. If you choose `ASYNCIO` but are not
running Linux or you do not have libaio installed then Apache ActiveMQ Artemis will
detect this and automatically fall back to using `NIO`.
Choosing `MAPPED` chooses the Java Memory Mapped journal.
- `journal-sync-transactional`
- `journal-sync-transactional`
If this is set to true then Apache ActiveMQ Artemis will make sure all transaction
data is flushed to disk on transaction boundaries (commit, prepare
and rollback). The default value is `true`.
If this is set to true then Apache ActiveMQ Artemis will make sure all transaction
data is flushed to disk on transaction boundaries (commit, prepare
and rollback). The default value is `true`.
- `journal-sync-non-transactional`
- `journal-sync-non-transactional`
If this is set to true then Apache ActiveMQ Artemis will make sure non
transactional message data (sends and acknowledgements) are flushed
to disk each time. The default value for this is `true`.
If this is set to true then Apache ActiveMQ Artemis will make sure non
transactional message data (sends and acknowledgements) are flushed
to disk each time. The default value for this is `true`.
- `journal-file-size`
- `journal-file-size`
The size of each journal file in bytes. The default value for this
is `10485760` bytes (10MiB).
The size of each journal file in bytes. The default value for this
is `10485760` bytes (10MiB).
- `journal-min-files`
- `journal-min-files`
The minimum number of files the journal will maintain. When Apache ActiveMQ Artemis
starts and there is no initial message data, Apache ActiveMQ Artemis will
pre-create `journal-min-files` number of files.
The minimum number of files the journal will maintain. When Apache ActiveMQ Artemis
starts and there is no initial message data, Apache ActiveMQ Artemis will
pre-create `journal-min-files` number of files.
Creating journal files and filling them with padding is a fairly
expensive operation and we want to minimise doing this at run-time
as files get filled. By pre-creating files, as one is filled the
journal can immediately resume with the next one without pausing to
create it.
Creating journal files and filling them with padding is a fairly
expensive operation and we want to minimise doing this at run-time
as files get filled. By pre-creating files, as one is filled the
journal can immediately resume with the next one without pausing to
create it.
Depending on how much data you expect your queues to contain at
steady state you should tune this number of files to match that
total amount of data.
Depending on how much data you expect your queues to contain at
steady state you should tune this number of files to match that
total amount of data.
- `journal-pool-files`
- `journal-pool-files`
The system will create as many files as needed however when reclaiming files
it will shrink back to the `journal-pool-files`.
The system will create as many files as needed however when reclaiming files
it will shrink back to the `journal-pool-files`.
The default to this parameter is -1, meaning it will never delete files on the journal once created.
The default to this parameter is -1, meaning it will never delete files on the journal once created.
Notice that the system can't grow infinitely as you are still required to use paging for destinations that can
grow indefinitely.
Notice that the system can't grow infinitely as you are still required to use paging for destinations that can
grow indefinitely.
Notice: in case you get too many files you can use [compacting](tools.md).
Notice: in case you get too many files you can use [compacting](data-tools.md).
- `journal-max-io`
- `journal-max-io`
Write requests are queued up before being submitted to the system
for execution. This parameter controls the maximum number of write
requests that can be in the IO queue at any one time. If the queue
becomes full then writes will block until space is freed up.
Write requests are queued up before being submitted to the system
for execution. This parameter controls the maximum number of write
requests that can be in the IO queue at any one time. If the queue
becomes full then writes will block until space is freed up.
When using NIO, this value should always be equal to `1`
When using NIO, this value should always be equal to `1`
When using AIO, the default should be `500`.
When using ASYNCIO, the default should be `500`.
The system maintains different defaults for this parameter depending
on whether it's NIO or AIO (default for NIO is 1, default for AIO is
500)
The system maintains different defaults for this parameter depending
on whether it's NIO or ASYNCIO (default for NIO is 1, default for ASYNCIO is
500)
There is a limit and the total max AIO can't be higher than what is
configured at the OS level (/proc/sys/fs/aio-max-nr) usually at
65536.
There is a limit and the total max ASYNCIO can't be higher than what is
configured at the OS level (/proc/sys/fs/aio-max-nr) usually at
65536.
- `journal-buffer-timeout`
- `journal-buffer-timeout`
Instead of flushing on every write that requires a flush, we
maintain an internal buffer, and flush the entire buffer either when
it is full, or when a timeout expires, whichever is sooner. This is
used for both NIO and AIO and allows the system to scale better with
many concurrent writes that require flushing.
Instead of flushing on every write that requires a flush, we
maintain an internal buffer, and flush the entire buffer either when
it is full, or when a timeout expires, whichever is sooner. This is
used for both NIO and ASYNCIO and allows the system to scale better with
many concurrent writes that require flushing.
This parameter controls the timeout at which the buffer will be
flushed if it hasn't filled already. AIO can typically cope with a
higher flush rate than NIO, so the system maintains different
defaults for both NIO and AIO (default for NIO is 3333333
nanoseconds - 300 times per second, default for AIO is 500000
nanoseconds - ie. 2000 times per second).
This parameter controls the timeout at which the buffer will be
flushed if it hasn't filled already. ASYNCIO can typically cope with a
higher flush rate than NIO, so the system maintains different
defaults for both NIO and ASYNCIO (default for NIO is 3333333
nanoseconds - 300 times per second, default for ASYNCIO is 500000
nanoseconds - ie. 2000 times per second).
> **Note**
>
> By increasing the timeout, you may be able to increase system
> throughput at the expense of latency, the default parameters are
> chosen to give a reasonable balance between throughput and
> latency.
> **Note:**
>
> By increasing the timeout, you may be able to increase system
> throughput at the expense of latency, the default parameters are
> chosen to give a reasonable balance between throughput and
> latency.
- `journal-buffer-size`
- `journal-buffer-size`
The size of the timed buffer on AIO. The default value is `490KiB`.
The size of the timed buffer on ASYNCIO. The default value is `490KiB`.
- `journal-compact-min-files`
- `journal-compact-min-files`
The minimal number of files before we can consider compacting the
journal. The compacting algorithm won't start until you have at
least `journal-compact-min-files`
The minimal number of files before we can consider compacting the
journal. The compacting algorithm won't start until you have at
least `journal-compact-min-files`
Setting this to 0 will disable the feature to compact completely.
This could be dangerous though as the journal could grow indefinitely.
Use it wisely!
Setting this to 0 will disable the feature to compact completely.
This could be dangerous though as the journal could grow indefinitely.
Use it wisely!
The default for this parameter is `10`
The default for this parameter is `10`
- `journal-compact-percentage`
- `journal-compact-percentage`
The threshold to start compacting. When less than this percentage is
considered live data, we start compacting. Note also that compacting
won't kick in until you have at least `journal-compact-min-files`
data files on the journal
The threshold to start compacting. When less than this percentage is
considered live data, we start compacting. Note also that compacting
won't kick in until you have at least `journal-compact-min-files`
data files on the journal
The default for this parameter is `30`
- `journal-datasync` (default: true)
This will disable the use of fdatasync on journal writes.
When enabled it ensures full power failure durability, otherwise
process failure durability on journal writes (OS guaranteed).
This is particular effective for `NIO` and `MAPPED` journals, which rely on
*fsync*/*msync* to force write changes to disk.
The default for this parameter is `30`
- `journal-datasync` (default: true)
This will disable the use of fdatasync on journal writes.
When enabled it ensures full power failure durability, otherwise
process failure durability on journal writes (OS guaranteed).
This is particular effective for `NIO` and `MAPPED` journals, which rely on
*fsync*/*msync* to force write changes to disk.
#### Note on disabling `journal-datasync`
@ -362,9 +361,9 @@ The message journal is configured using the following attributes in
The Java NIO journal gives great performance, but If you are running
Apache ActiveMQ Artemis using Linux Kernel 2.6 or later, we highly recommend you use
the `AIO` journal for the very best persistence performance.
the `ASYNCIO` journal for the very best persistence performance.
It's not possible to use the AIO journal under other operating systems
It's not possible to use the ASYNCIO journal under other operating systems
or earlier versions of the Linux kernel.
If you are running Linux kernel 2.6 or later and don't already have
@ -372,11 +371,15 @@ If you are running Linux kernel 2.6 or later and don't already have
Using yum, (e.g. on Fedora or Red Hat Enterprise Linux):
yum install libaio
```sh
yum install libaio
```
Using aptitude, (e.g. on Ubuntu or Debian system):
apt-get install libaio
```sh
apt-get install libaio
```
## JDBC Persistence
@ -423,50 +426,50 @@ To configure Apache ActiveMQ Artemis to use a database for persisting messages a
</store>
```
- `jdbc-connection-url`
- `jdbc-connection-url`
The full JDBC connection URL for your database server. The connection url should include all configuration parameters and database name. Note: When configuring the server using the XML configuration files please ensure to escape any illegal chars; "&" for example, is typical in JDBC connection url and should be escaped to "&amp;".
The full JDBC connection URL for your database server. The connection url should include all configuration parameters and database name. **Note:** When configuring the server using the XML configuration files please ensure to escape any illegal chars; "&" for example, is typical in JDBC connection url and should be escaped to "&amp;".
- `bindings-table-name`
- `bindings-table-name`
The name of the table in which bindings data will be persisted for the ActiveMQ Artemis server. Specifying table names allows users to share single database amongst multiple servers, without interference.
The name of the table in which bindings data will be persisted for the ActiveMQ Artemis server. Specifying table names allows users to share single database amongst multiple servers, without interference.
- `message-table-name`
- `message-table-name`
The name of the table in which bindings data will be persisted for the ActiveMQ Artemis server. Specifying table names allows users to share single database amongst multiple servers, without interference.
The name of the table in which bindings data will be persisted for the ActiveMQ Artemis server. Specifying table names allows users to share single database amongst multiple servers, without interference.
- `large-message-table-name`
- `large-message-table-name`
The name of the table in which messages and related data will be persisted for the ActiveMQ Artemis server. Specifying table names allows users to share single database amongst multiple servers, without interference.
The name of the table in which messages and related data will be persisted for the ActiveMQ Artemis server. Specifying table names allows users to share single database amongst multiple servers, without interference.
- `page-store-table-name`
The name of the table to house the page store directory information. Note that each address will have it's own page table which will use this name appended with a unique id of up to 20 characters.
- `node-manager-store-table-name`
The name of the table in which the HA Shared Store locks (ie live and backup) and HA related data will be persisted for the ActiveMQ Artemis server. Specifying table names allows users to share single database amongst multiple servers, without interference.
Each Shared Store live/backup pairs must use the same table name and isn't supported to share the same table between multiple (and unrelated) live/backup pairs.
- `jdbc-driver-class-name`
The fully qualified class name of the desired database Driver.
- `jdbc-network-timeout`
The JDBC network connection timeout in milliseconds. The default value
is 20000 milliseconds (ie 20 seconds).
When using a shared store it is recommended to set it less then or equal to `jdbc-lock-expiration`.
- `page-store-table-name`
- `jdbc-lock-renew-period`
The name of the table to house the page store directory information. Note that each address will have it's own page table which will use this name appended with a unique id of up to 20 characters.
The period in milliseconds of the keep alive service of a JDBC lock. The default value
is 2000 milliseconds (ie 2 seconds).
- `jdbc-lock-expiration`
- `node-manager-store-table-name`
The name of the table in which the HA Shared Store locks (ie live and backup) and HA related data will be persisted for the ActiveMQ Artemis server. Specifying table names allows users to share single database amongst multiple servers, without interference.
Each Shared Store live/backup pairs must use the same table name and isn't supported to share the same table between multiple (and unrelated) live/backup pairs.
- `jdbc-driver-class-name`
The fully qualified class name of the desired database Driver.
- `jdbc-network-timeout`
The JDBC network connection timeout in milliseconds. The default value
is 20000 milliseconds (ie 20 seconds).
When using a shared store it is recommended to set it less then or equal to `jdbc-lock-expiration`.
- `jdbc-lock-renew-period`
The period in milliseconds of the keep alive service of a JDBC lock. The default value
is 2000 milliseconds (ie 2 seconds).
- `jdbc-lock-expiration`
The time in milliseconds a JDBC lock is considered valid without keeping it alive. The default value
is 20000 milliseconds (ie 20 seconds).
The time in milliseconds a JDBC lock is considered valid without keeping it alive. The default value
is 20000 milliseconds (ie 20 seconds).
- `jdbc-journal-sync-period`

View File

@ -2,11 +2,11 @@
JMS specifies 3 acknowledgement modes:
- `AUTO_ACKNOWLEDGE`
- `AUTO_ACKNOWLEDGE`
- `CLIENT_ACKNOWLEDGE`
- `CLIENT_ACKNOWLEDGE`
- `DUPS_OK_ACKNOWLEDGE`
- `DUPS_OK_ACKNOWLEDGE`
Apache ActiveMQ Artemis supports two additional modes: `PRE_ACKNOWLEDGE` and
`INDIVIDUAL_ACKNOWLEDGE`
@ -32,7 +32,7 @@ update messages. With these messages it might be reasonable to lose a
message in event of crash, since the next price update message will
arrive soon, overriding the previous price.
> **Note**
> **Note:**
>
> Please note, that if you use pre-acknowledge mode, then you will lose
> transactional semantics for messages being consumed, since clearly
@ -67,7 +67,7 @@ acknowledge mode with `ActiveMQJMSConstants.INDIVIDUAL_ACKNOWLEDGE`.
Individual ACK inherits all the semantics from Client Acknowledge, with
the exception the message is individually acked.
> **Note**
> **Note:**
>
> Please note, that to avoid confusion on MDB processing, Individual
> ACKNOWLEDGE is not supported through MDBs (or the inbound resource
@ -76,5 +76,5 @@ the exception the message is individually acked.
## Example
See the [examples](examples.md) chapter for an example which shows how to
use pre-acknowledgement mode with JMS.
See the [Pre-acknowledge Example](examples.md#pre-acknowledge) which shows how
to use pre-acknowledgement mode with JMS.

View File

@ -2,43 +2,43 @@
What is Apache ActiveMQ Artemis?
- Apache ActiveMQ Artemis is an open source project to build a multi-protocol,
embeddable, very high performance, clustered, asynchronous messaging
system.
- Apache ActiveMQ Artemis is an open source project to build a multi-protocol,
embeddable, very high performance, clustered, asynchronous messaging
system.
- Apache ActiveMQ Artemis is an example of Message Oriented Middleware (MoM). For a
description of MoMs and other messaging concepts please see the [Messaging Concepts](messaging-concepts.md).
- Apache ActiveMQ Artemis is an example of Message Oriented Middleware (MoM). For a
description of MoMs and other messaging concepts please see the [Messaging Concepts](messaging-concepts.md).
Why use Apache ActiveMQ Artemis? Here are just a few of the reasons:
- 100% open source software. Apache ActiveMQ Artemis is licensed using the Apache
Software License v 2.0 to minimise barriers to adoption.
- 100% open source software. Apache ActiveMQ Artemis is licensed using the Apache
Software License v 2.0 to minimise barriers to adoption.
- Apache ActiveMQ Artemis is designed with usability in mind.
- Apache ActiveMQ Artemis is designed with usability in mind.
- Written in Java. Runs on any platform with a Java 8+ runtime, that's
everything from Windows desktops to IBM mainframes.
- Written in Java. Runs on any platform with a Java 8+ runtime, that's
everything from Windows desktops to IBM mainframes.
- Amazing performance. Our ground-breaking high performance journal
provides persistent messaging performance at rates normally seen for
non-persistent messaging, our non-persistent messaging performance
rocks the boat too.
- Amazing performance. Our ground-breaking high performance journal
provides persistent messaging performance at rates normally seen for
non-persistent messaging, our non-persistent messaging performance
rocks the boat too.
- Full feature set. All the features you'd expect in any serious
messaging system, and others you won't find anywhere else.
- Full feature set. All the features you'd expect in any serious
messaging system, and others you won't find anywhere else.
- Elegant, clean-cut design with minimal third party dependencies. Run
ActiveMQ Artemis stand-alone, run it in integrated in your favourite Java EE
application server, or run it embedded inside your own product. It's
up to you.
- Elegant, clean-cut design with minimal third party dependencies. Run
ActiveMQ Artemis stand-alone, run it in integrated in your favourite Java EE
application server, or run it embedded inside your own product. It's
up to you.
- Seamless high availability. We provide a HA solution with automatic
client failover so you can guarantee zero message loss or
duplication in event of server failure.
- Seamless high availability. We provide a HA solution with automatic
client failover so you can guarantee zero message loss or
duplication in event of server failure.
- Hugely flexible clustering. Create clusters of servers that know how
to load balance messages. Link geographically distributed clusters
over unreliable connections to form a global network. Configure
routing of messages in a highly flexible way.
- Hugely flexible clustering. Create clusters of servers that know how
to load balance messages. Link geographically distributed clusters
over unreliable connections to form a global network. Configure
routing of messages in a highly flexible way.

View File

@ -9,20 +9,19 @@ page:<http://activemq.apache.org/artemis/download.html>
## Project Information
- If you have any user questions please use our [user
forum](http://activemq.2283324.n4.nabble.com/ActiveMQ-User-f2341805.html)
- If you have any user questions please use our [user
forum](http://activemq.2283324.n4.nabble.com/ActiveMQ-User-f2341805.html)
- If you have development related questions, please use our [developer
forum](http://activemq.2283324.n4.nabble.com/ActiveMQ-Dev-f2368404.html)
- If you have development related questions, please use our [developer
forum](http://activemq.2283324.n4.nabble.com/ActiveMQ-Dev-f2368404.html)
- Pop in and chat to us in our [IRC
channel](irc://irc.freenode.net:6667/apache-activemq)
- Pop in and chat to us in our [IRC
channel](irc://irc.freenode.net:6667/apache-activemq)
- Apache ActiveMQ Artemis Git repository is <https://github.com/apache/activemq-artemis>
- All release tags are available from
<https://github.com/apache/activemq-artemis/releases>
- Apache ActiveMQ Artemis Git repository is <https://github.com/apache/activemq-artemis>
- All release tags are available from
<https://github.com/apache/activemq-artemis/releases>
And many thanks to all our contributors, both old and new who helped
create Apache ActiveMQ Artemis.

View File

@ -1,646 +1,69 @@
# Protocols and Interoperability
## Protocols
Apache ActiveMQ Artemis has a powerful & flexible core which provides a foundation upon which other protocols can be
implemented. Each protocol implementation translates the ideas of its specific protocol onto this core.
ActiveMQ Artemis has a plugable protocol architecture. Protocol plugins come in the form of ActiveMQ Artemis protocol
modules. Each protocol module should be added to the brokers class path and are loaded by the broker at boot time.
ActiveMQ Artemis ships with 5 protocol modules out of the box. The 5 modules offer support for the following protocols:
The broker ships with a client implementation which interacts directly with this core. It uses what's called the ["core"
API](core.md), and it communicates over the network using the "core" protocol.
* AMQP
* OpenWire
* MQTT
* STOMP
* HornetQ
## Supported Protocols & APIs
In addition to the protocols above ActiveMQ Artemis also offers support for it's own highly performant native protocol
"Core".
The broker has a pluggable protocol architecture. Protocol plugins come in the form of protocol modules. Each protocol
module is included on the broker's class path and loaded by the broker at boot time. The broker ships with 5 protocol
modules out of the box. The 5 modules offer support for the following protocols:
## Configuring protocols
- [AMQP](amqp.md)
- [OpenWire](openwire.md)
- [MQTT](mqtt.md)
- [STOMP](stomp.md)
- HornetQ
#### APIs and Other Interfaces
Although JMS is a standardized API, it does not define a network protocol. The [ActiveMQ Artemis JMS 2.0 client](using-jms.md)
is implemented on top of the core protocol. We also provide a [client-side JNDI implementation](using-jms.md#jndi).
The broker also ships with a [REST messaging interface](rest.md) (not to be confused with the REST management API
provided via our integration with Jolokia).
## Configuring Acceptors
In order to make use of a particular protocol, a transport must be configured with the desired protocol enabled. There
is a whole section on configuring transports that can be found [here](configuring-transports.md).
The default configuration shipped with the ActiveMQ Artemis distribution comes with a number of acceptors already
defined, one for each of the above protocols plus a generic acceptor that supports all protocols. To enable a
protocol on a particular acceptor simply add a url parameter "protocol=AMQP,STOMP" to the acceptor url. Where the value
of the parameter is a comma separated list of protocol names. If the protocol parameter is omitted from the url all
protocols are enabled.
defined, one for each of the above protocols plus a generic acceptor that supports all protocols. To enable
protocols on a particular acceptor simply add the `protocols` url parameter to the acceptor url where the value is one
or more protocols (separated by commas). If the `protocols` parameter is omitted from the url **all** protocols are
enabled.
- The following example enables only MQTT on port 1883
```xml
<!-- The following example enables only MQTT on port 1883 -->
<acceptors>
<acceptor>tcp://localhost:1883?protocols=MQTT</acceptor>
</acceptors>
```
<!-- The following example enables MQTT and AMQP on port 61617 -->
- The following example enables MQTT and AMQP on port 1883
```xml
<acceptors>
<acceptor>tcp://localhost:1883?protocols=MQTT,AMQP</acceptor>
<acceptor>tcp://localhost:5672?protocols=MQTT,AMQP</acceptor>
</acceptors>
```
<!-- The following example enables all protocols on 61616 -->
- The following example enables **all** protocols on `61616`:
```xml
<acceptors>
<acceptor>tcp://localhost:61616</acceptor>
</acceptors>
```
## AMQP
Apache ActiveMQ Artemis supports the [AMQP
1.0](https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=amqp)
specification. To enable AMQP you must configure a Netty Acceptor to
receive AMQP clients, like so:
```xml
<acceptor name="amqp-acceptor">tcp://localhost:5672?protocols=AMQP</acceptor>
```
Apache ActiveMQ Artemis will then accept AMQP 1.0 clients on port 5672 which is the
default AMQP port.
There are 2 AMQP examples available see proton-j and proton-ruby which
use the qpid Java and Ruby clients respectively.
### AMQP and security
The Apache ActiveMQ Artemis Server accepts AMQP SASL Authentication and will use this
to map onto the underlying session created for the connection so you can
use the normal Apache ActiveMQ Artemis security configuration.
### AMQP Links
An AMQP Link is a uni directional transport for messages between a
source and a target, i.e. a client and the Apache ActiveMQ Artemis Broker. A link will
have an endpoint of which there are 2 kinds, a Sender and A Receiver. At
the Broker a Sender will have its messages converted into an Apache ActiveMQ Artemis
Message and forwarded to its destination or target. A Receiver will map
onto an Apache ActiveMQ Artemis Server Consumer and convert Apache ActiveMQ Artemis messages back into
AMQP messages before being delivered.
### AMQP and destinations
If an AMQP Link is dynamic then a temporary queue will be created and
either the remote source or remote target address will be set to the
name of the temporary queue. If the Link is not dynamic then the the
address of the remote target or source will used for the queue. If this
does not exist then an exception will be sent
> **Note**
>
> For the next version we will add a flag to aut create durable queue
> but for now you will have to add them via the configuration
### AMQP and Multicast Queues (Topics)
Although amqp has no notion of topics it is still possible to treat amqp consumers or receivers as subscriptions rather
than just consumers on a queue. By default any receiving link that attaches to an address that has only multicast enabled
will be treated as a subscription and a subscription queue will be created. If the Terminus Durability is either UNSETTLED_STATE
or CONFIGURATION then the queue will be made durable, similar to a JMS durable subscription and given a name made up from
the container id and the link name, something like `my-container-id:my-link-name`. if the Terminus Durability is configured
as NONE then a volatile multicast queue will be created.
Artemis also supports the qpid-jms client and will respect its use of topics regardless of the prefix used for the address.
### AMQP and Coordinations - Handling Transactions
An AMQP links target can also be a Coordinator, the Coordinator is used
to handle transactions. If a coordinator is used the the underlying
HormetQ Server session will be transacted and will be either rolled back
or committed via the coordinator.
> **Note**
>
> AMQP allows the use of multiple transactions per session,
> `amqp:multi-txns-per-ssn`, however in this version Apache ActiveMQ Artemis will only
> support single transactions per session
### AMQP scheduling message delivery
An AMQP message can provide scheduling information that controls the time in the future when the
message will be delivered at the earliest. This information is provided by adding a message annotation
to the sent message.
There are two different message annotations that can be used to schedule a message for later delivery:
* `x-opt-delivery-time`
The specified value must be a positive long corresponding to the time the message should be made available
for delivery (in milliseconds).
* `x-opt-delivery-delay`
The specified value must be a positive long corresponding to the amount of milliseconds after the broker
receives the given message before it should be made available for delivery.
if both annotations are present in the same message then the broker will prefer the more specific `x-opt-delivery-time` value.
## OpenWire
Apache ActiveMQ Artemis now supports the
[OpenWire](http://activemq.apache.org/openwire.html) protocol so that an
Apache ActiveMQ 5.x JMS client can talk directly to an Apache ActiveMQ Artemis server. To enable
OpenWire support you must configure a Netty Acceptor, like so:
```xml
<acceptor name="openwire-acceptor">tcp://localhost:61616?protocols=OPENWIRE</acceptor>
```
The Apache ActiveMQ Artemis server will then listens on port 61616 for incoming
openwire commands. Please note the "protocols" is not mandatory here.
The openwire configuration conforms to Apache ActiveMQ Artemis's "Single Port" feature.
Please refer to [Configuring Single Port](configuring-transports.md#single-port-support) for details.
Please refer to the openwire example for more coding details.
Currently we support Apache ActiveMQ Artemis clients that using standard JMS APIs. In
the future we will get more supports for some advanced, Apache ActiveMQ Artemis
specific features into Apache ActiveMQ Artemis.
### Connection Monitoring
OpenWire has a few parameters to control how each connection is monitored, they are:
* maxInactivityDuration:
It specifies the time (milliseconds) after which the connection is closed by the broker if no data was received.
Default value is 30000.
* maxInactivityDurationInitalDelay:
It specifies the maximum delay (milliseconds) before inactivity monitoring is started on the connection.
It can be useful if a broker is under load with many connections being created concurrently.
Default value is 10000.
* useInactivityMonitor:
A value of false disables the InactivityMonitor completely and connections will never time out.
By default it is enabled. On broker side you don't neet set this. Instead you can set the
connection-ttl to -1.
* useKeepAlive:
Whether or not to send a KeepAliveInfo on an idle connection to prevent it from timing out.
Enabled by default. Disabling the keep alive will still make connections time out if no data
was received on the connection for the specified amount of time.
Note at the beginning the InactivityMonitor negotiates the appropriate maxInactivityDuration and
maxInactivityDurationInitalDelay. The shortest duration is taken for the connection.
More details please see [ActiveMQ InactivityMonitor](http://activemq.apache.org/activemq-inactivitymonitor.html).
### Disable/Enable Advisories
By default, advisory topics ([ActiveMQ Advisory](http://activemq.apache.org/advisory-message.html))
are created in order to send certain type of advisory messages to listening clients. As a result,
advisory addresses and queues will be displayed on the management console, along with user deployed
addresses and queues. This sometimes cause confusion because the advisory objects are internally
managed without user being aware of them. In addition, users may not want the advisory topics at all
(they cause extra resources and performance penalty) and it is convenient to disable them at all
from the broker side.
The protocol provides two parameters to control advisory behaviors on the broker side.
* supportAdvisory
Whether or not the broker supports advisory messages. If the value is true, advisory addresses/
queues will be created. If the value is false, no advisory addresses/queues are created. Default
value is true.
* suppressInternalManagementObjects
Whether or not the advisory addresses/queues, if any, will be registered to management service
(e.g. JMX registry). If set to true, no advisory addresses/queues will be registered. If set to
false, those are registered and will be displayed on the management console. Default value is
true.
The two parameters are configured on openwire acceptors, via URLs or API. For example:
<acceptor name="artemis">tcp://127.0.0.1:61616?protocols=CORE,AMQP,OPENWIRE;supportAdvisory=true;suppressInternalManagementObjects=false</acceptor>
### Virtual Topic Consumer Destination Translation
For existing OpenWire consumers of virtual topic destinations it is possible to configure a mapping function
that will translate the virtual topic consumer destination into a FQQN address. This address then represents
the consumer as a multicast binding to an address representing the virtual topic.
The configuration string property ```virtualTopicConsumerWildcards``` has two parts seperated by a ```;```.
The first is the 5.x style destination filter that identifies the destination as belonging to a virtual topic.
The second identifies the number of ```paths``` that identify the consumer queue such that it can be parsed from the
destination.
For example, the default 5.x virtual topic with consumer prefix of ```Consumer.*.```, would require a
```virtualTopicConsumerWildcards``` filter of ```Consumer.*.>;2```. As a url parameter this transforms to ```Consumer.*.%3E%3B2``` when
the url significant characters ```>;``` are escaped with their hex code points.
In an acceptor url it would be:
```xml
<acceptor name="artemis">tcp://127.0.0.1:61616?protocols=OPENWIRE;virtualTopicConsumerWildcards=Consumer.*.%3E%3B2</acceptor>
```
This will translate ```Consumer.A.VirtualTopic.Orders``` into a FQQN of ```VirtualTopic.Orders::Consumer.A``` using the
int component ```2``` of the configuration to identify the consumer queue as the first two paths of the destination.
```virtualTopicConsumerWildcards``` is multi valued using a ```,``` separator.
Please see Virtual Topic Mapping example contained in the OpenWire [examples](examples.md).
## MQTT
MQTT is a light weight, client to server, publish / subscribe messaging protocol. MQTT has been specifically
designed to reduce transport overhead (and thus network traffic) and code footprint on client devices.
For this reason MQTT is ideally suited to constrained devices such as sensors and actuators and is quickly
becoming the defacto standard communication protocol for IoT.
Apache ActiveMQ Artemis supports MQTT v3.1.1 (and also the older v3.1 code message format). To enable MQTT,
simply add an appropriate acceptor with the MQTT protocol enabled. For example:
<acceptor name="mqtt">tcp://localhost:1883?protocols=MQTT</acceptor>
By default the configuration shipped with Apache ActiveMQ Artemis has the above acceptor already defined, MQTT is
also active by default on the generic acceptor defined on port 61616 (where all protocols are enabled), in the out
of the box configuration.
The best source of information on the MQTT protocol is in the specification. The MQTT v3.1.1 specification can
be downloaded from the OASIS website here: https://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html
Some note worthy features of MQTT are explained below:
### MQTT Quality of Service
MQTT offers 3 quality of service levels.
Each message (or topic subscription) can define a quality of service that is associated with it. The quality of service
level defined on a topic is the maximum level a client is willing to accept. The quality of service level on a
message is the desired quality of service level for this message. The broker will attempt to deliver messages to
subscribers at the highest quality of service level based on what is defined on the message and topic subscription.
Each quality of service level offers a level of guarantee by which a message is sent or received:
* QoS 0: AT MOST ONCE: Guarantees that a particular message is only ever received by the subscriber a maximum of one time.
This does mean that the message may never arrive. The sender and the receiver will attempt to deliver the message,
but if something fails and the message does not reach it's destination (say due to a network connection) the message
may be lost. This QoS has the least network traffic overhead and the least burden on the client and the broker and is often
useful for telemetry data where it doesn't matter if some of the data is lost.
* QoS 1: AT LEAST ONCE: Guarantees that a message will reach it's intended recipient one or more times. The sender will
continue to send the message until it receives an acknowledgment from the recipient, confirming it has received the message.
The result of this QoS is that the recipient may receive the message multiple times, and also increases the network
overhead than QoS 0, (due to acks). In addition more burden is placed on the sender as it needs to store the message
and retry should it fail to receive an ack in a reasonable time.
* QoS 2: EXACTLY ONCE: The most costly of the QoS (in terms of network traffic and burden on sender and receiver) this
QoS will ensure that the message is received by a recipient exactly one time. This ensures that the receiver never gets
any duplicate copies of the message and will eventually get it, but at the extra cost of network overhead and complexity
required on the sender and receiver.
### MQTT Retain Messages
MQTT has an interesting feature in which messages can be "retained" for a particular address. This means that once a
retain message has been sent to an address, any new subscribers to that address will receive the last sent retain
message before any others messages, this happens even if the retained message was sent before a client has connected
or subscribed. An example of where this feature might be useful is in environments such as IoT where devices need to
quickly get the current state of a system when they are on boarded into a system.
### Will Messages
A will message can be sent when a client initially connects to a broker. Clients are able to set a "will
message" as part of the connect packet. If the client abnormally disconnects, say due to a device or network failure
the broker will proceed to publish the will message to the specified address (as defined also in the connect packet).
Other subscribers to the will topic will receive the will message and can react accordingly. This feature can be useful
in an IoT style scenario to detect errors across a potentially large scale deployment of devices.
### Debug Logging
Detailed protocol logging (e.g. packets in/out) can be activated via the following steps:
1) Open `<ARTEMIS_INSTANCE>/etc/logging.properties`
2) Add `org.apache.activemq.artemis.core.protocol.mqtt` to the `loggers` list.
3) Add this line to enable `TRACE` logging for this new logger: `logger.org.apache.activemq.artemis.core.protocol.mqtt.level=TRACE`
4) Ensure the `level` for the `handler` you want to log the message doesn't block the `TRACE` logging. For example,
modify the `level` of the `CONSOLE` `handler` like so: `handler.CONSOLE.level=TRACE`
The MQTT specification doesn't dictate the format of the payloads which clients publish. As far as the broker is
concerned a payload is just just an array of bytes. However, to facilitate logging the broker will encode the payloads
as UTF-8 strings and print them up to 256 characters. Payload logging is limited to avoid filling the logs with potentially
hundreds of megabytes of unhelpful information.
### Wild card subscriptions
MQTT addresses are hierarchical much like a file system, and use "/" character to separate hierarchical levels.
Subscribers are able to subscribe to specific topics or to whole branches of a hierarchy.
To subscribe to branches of an address hierarchy a subscriber can use wild cards.
There are 2 types of wild card in MQTT:
* "#" Multi level wild card. Adding this wild card to an address would match all branches of the address hierarchy
under a specified node. For example: /uk/# Would match /uk/cities, /uk/cities/newcastle and also /uk/rivers/tyne.
Subscribing to an address "#" would result in subscribing to all topics in the broker. This can be useful, but should
be done so with care since it has significant performance implications.
* "+" Single level wild card. Matches a single level in the address hierarchy. For example /uk/+/stores would
match /uk/newcastle/stores but not /uk/cities/newcastle/stores.
## Stomp
[Stomp](https://stomp.github.io/) is a text-orientated wire protocol
that allows Stomp clients to communicate with Stomp Brokers. Apache ActiveMQ Artemis
now supports Stomp 1.0, 1.1 and 1.2.
Stomp clients are available for several languages and platforms making
it a good choice for interoperability.
## Native Stomp support
Apache ActiveMQ Artemis provides native support for Stomp. To be able to send and
receive Stomp messages, you must configure a `NettyAcceptor` with a
`protocols` parameter set to have `stomp`:
```xml
<acceptor name="stomp-acceptor">tcp://localhost:61613?protocols=STOMP</acceptor>
```
With this configuration, Apache ActiveMQ Artemis will accept Stomp connections on the
port `61613` (which is the default port of the Stomp brokers).
See the `stomp` example which shows how to configure an Apache ActiveMQ Artemis server
with Stomp.
### Limitations
Message acknowledgements are not transactional. The ACK frame can not be
part of a transaction (it will be ignored if its `transaction` header is
set).
### Stomp 1.1/1.2 Notes
#### Virtual Hosting
Apache ActiveMQ Artemis currently doesn't support virtual hosting, which means the
'host' header in CONNECT fram will be ignored.
### Mapping Stomp destinations to addresses and queues
Stomp clients deals with *destinations* when sending messages and
subscribing. Destination names are simply strings which are mapped to
some form of destination on the server - how the server translates these
is left to the server implementation.
In Apache ActiveMQ Artemis, these destinations are mapped to *addresses* and *queues*
depending on the operation being done and the desired semantics (e.g. anycast or
multicast).
#### Sending
When a Stomp client sends a message (using a `SEND` frame), the protocol manager looks
at the message to determine where to route it and potentially how to create the address
and/or queue to which it is being sent. The protocol manager uses either of the following
bits of information from the frame to determine the routing type:
1. The value of the `destination-type` header. Valid values are `ANYCAST` and
`MULTICAST` (case sensitive).
2. The "prefix" on the `destination` header. See [additional info](address-model.md) on
prefixes.
If no indication of routing type is supplied then anycast semantics are used.
The `destination` header maps to an address of the same name. If the `destination` header
used a prefix then the prefix is stripped.
#### Receiving
When a client receives a message from the broker the message will have the `destination-type`
header set to either `MULTICAST` or `ANYCAST` as determined when the message was originally
sent/routed.
#### Subscribing
When a Stomp client subscribes to a destination (using a `SUBSCRIBE` frame), the protocol
manager looks at the frame to determine what subscription semantics to use and potentially how
to create the address and/or queue for the subscription. The protocol manager uses either of
the following bits of information from the frame to determine the routing type:
1. The value of the `subscription-type` header. Valid values are `ANYCAST` and
`MULTICAST` (case sensitive).
2. The "prefix" on the `destination` header. See [additional info](address-model.md) on
prefixes.
If no indication of routing type is supplied then anycast semantics are used.
The `destination` header maps to an address of the same name if multicast is used or to a queue
of the same name if anycast is used. If the `destination` header used a prefix then the prefix
is stripped.
### STOMP heart-beating and connection-ttl
Well behaved STOMP clients will always send a DISCONNECT frame before
closing their connections. In this case the server will clear up any
server side resources such as sessions and consumers synchronously.
However if STOMP clients exit without sending a DISCONNECT frame or if
they crash the server will have no way of knowing immediately whether
the client is still alive or not. STOMP connections therefore default to
a connection-ttl value of 1 minute (see chapter on
[connection-ttl](connection-ttl.md) for more information. This value can
be overridden using the `connection-ttl-override` property or if you
need a specific connectionTtl for your stomp connections without
affecting the broker-wide `connection-ttl-override` setting, you can
configure your stomp acceptor with the "connectionTtl" property, which
is used to set the ttl for connections that are created from that acceptor.
For example:
```xml
<acceptor name="stomp-acceptor">tcp://localhost:61613?protocols=STOMP;connectionTtl=20000</acceptor>
```
The above configuration will make sure that any Stomp connection that is
created from that acceptor and does not include a `heart-beat` header
or disables client-to-server heart-beats by specifying a `0` value will
have its connection-ttl set to 20 seconds. The `connectionTtl` set on an
acceptor will take precedence over `connection-ttl-override`. The default
`connectionTtl` is 60,000 milliseconds.
Since Stomp 1.0 does not support heart-beating then all connections from
Stomp 1.0 clients will have a connection TTL imposed upon them by the broker
based on the aforementioned configuration options. Likewise, any Stomp 1.1
or 1.2 clients that don't specify a `heart-beat` header or disable client-to-server
heart-beating (e.g. by sending `0,X` in the `heart-beat` header) will have
a connection TTL imposed upon them by the broker.
For Stomp 1.1 and 1.2 clients which send a non-zero client-to-server `heart-beat`
header value then their connection TTL will be set accordingly. However, the broker
will not strictly set the connection TTL to the same value as the specified in the
`heart-beat` since even small network delays could then cause spurious disconnects.
Instead, the client-to-server value in the `heart-beat` will be multiplied by the
`heartBeatConnectionTtlModifer` specified on the acceptor. The
`heartBeatConnectionTtlModifer` is a decimal value that defaults to `2.0` so for
example, if a client sends a `heart-beat` header of `1000,0` the the connection TTL
will be set to `2000` so that the data or ping frames sent every 1000 milliseconds will
have a sufficient cushion so as not to be considered late and trigger a disconnect.
This is also in accordance with the Stomp 1.1 and 1.2 specifications which both state,
"because of timing inaccuracies, the receiver SHOULD be tolerant and take into account
an error margin."
The minimum and maximum connection TTL allowed can also be specified on the
acceptor via the `connectionTtlMin` and `connectionTtlMax` properties respectively.
The default `connectionTtlMin` is 1000 and the default `connectionTtlMax` is Java's
`Long.MAX_VALUE` meaning there essentially is no max connection TTL by default.
Keep in mind that the `heartBeatConnectionTtlModifer` is relevant here. For
example, if a client sends a `heart-beat` header of `20000,0` and the acceptor
is using a `connectionTtlMax` of `30000` and a default `heartBeatConnectionTtlModifer`
of `2.0` then the connection TTL would be `40000` (i.e. `20000` * `2.0`) which would
exceed the `connectionTtlMax`. In this case the server would respond to the client
with a `heart-beat` header of `0,15000` (i.e. `30000` / `2.0`). As described
previously, this is to make sure there is a sufficient cushion for the client
heart-beats in accordance with the Stomp 1.1 and 1.2 specifications. The same kind
of calculation is done for `connectionTtlMin`.
The minimum server-to-client heart-beat value is 500ms.
> **Note**
>
> Please note that the STOMP protocol version 1.0 does not contain any
> heart-beat frame. It is therefore the user's responsibility to make
> sure data is sent within connection-ttl or the server will assume the
> client is dead and clean up server side resources. With `Stomp 1.1`
> users can use heart-beats to maintain the life cycle of stomp
> connections.
### Selector/Filter expressions
Stomp subscribers can specify an expression used to select or filter
what the subscriber receives using the `selector` header. The filter
expression syntax follows the *core filter syntax* described in the
[Filter Expressions](filter-expressions.md) documentation.
### Stomp and JMS interoperability
#### Sending and consuming Stomp message from JMS or Apache ActiveMQ Artemis Core API
Stomp is mainly a text-orientated protocol. To make it simpler to
interoperate with JMS and Apache ActiveMQ Artemis Core API, our Stomp implementation
checks for presence of the `content-length` header to decide how to map
a Stomp 1.0 message to a JMS Message or a Core message.
If the Stomp 1.0 message does *not* have a `content-length` header, it will
be mapped to a JMS *TextMessage* or a Core message with a *single
nullable SimpleString in the body buffer*.
Alternatively, if the Stomp 1.0 message *has* a `content-length` header, it
will be mapped to a JMS *BytesMessage* or a Core message with a *byte[]
in the body buffer*.
The same logic applies when mapping a JMS message or a Core message to
Stomp. A Stomp 1.0 client can check the presence of the `content-length`
header to determine the type of the message body (String or bytes).
#### Message IDs for Stomp messages
When receiving Stomp messages via a JMS consumer or a QueueBrowser, the
messages have no properties like JMSMessageID by default. However this
may bring some inconvenience to clients who wants an ID for their
purpose. Apache ActiveMQ Artemis Stomp provides a parameter to enable message ID on
each incoming Stomp message. If you want each Stomp message to have a
unique ID, just set the `stompEnableMessageId` to true. For example:
<acceptor name="stomp-acceptor">tcp://localhost:61613?protocols=STOMP;stompEnableMessageId=true</acceptor>
When the server starts with the above setting, each stomp message sent
through this acceptor will have an extra property added. The property
key is `
amq-message-id` and the value is a String representation of a
long type internal message id prefixed with "`STOMP`", like:
amq-message-id : STOMP12345
If `stomp-enable-message-id` is not specified in the configuration,
default is `false`.
### Durable Subscriptions
The `SUBSCRIBE` and `UNSUBSCRIBE` frames can be augmented with special headers to create
and destroy durable subscriptions respectively.
To create a durable subscription the `client-id` header must be set on the `CONNECT` frame
and the `durable-subscription-name` must be set on the `SUBSCRIBE` frame. The combination
of these two headers will form the identity of the durable subscription.
To delete a durable subscription the `client-id` header must be set on the `CONNECT` frame
and the `durable-subscription-name` must be set on the `UNSUBSCRIBE` frame. The values for
these headers should match what was set on the `SUBSCRIBE` frame to delete the corresponding
durable subscription.
It is possible to pre-configure durable subscriptions since the Stomp implementation creates
the queue used for the durable subscription in a deterministic way (i.e. using the format of
`client-id`.`subscription-name`). For example, if you wanted to configure a durable
subscription on the address `myAddress` with a client-id of `myclientid` and a subscription
name of `mysubscription` then configure the durable subscription:
```xml
<core xmlns="urn:activemq:core">
...
<addresses>
<address name="myAddress">
<multicast>
<queue name="myclientid.mysubscription"/>
</multicast>
</address>
</addresses>
...
</core>
```
### Handling of Large Messages with Stomp
Stomp clients may send very large frame bodies which can exceed the
size of Apache ActiveMQ Artemis server's internal buffer, causing unexpected errors. To
prevent this situation from happening, Apache ActiveMQ Artemis provides a stomp
configuration attribute `stompMinLargeMessageSize`. This attribute
can be configured inside a stomp acceptor, as a parameter. For example:
```xml
<acceptor name="stomp-acceptor">tcp://localhost:61613?protocols=STOMP;stompMinLargeMessageSize=10240</acceptor>
```
The type of this attribute is integer. When this attributed is
configured, Apache ActiveMQ Artemis server will check the size of the body of each
Stomp frame arrived from connections established with this acceptor. If
the size of the body is equal or greater than the value of
`stompMinLargeMessageSize`, the message will be persisted as a large
message. When a large message is delievered to a stomp consumer, the
broker will automatically handle the conversion from a large
message to a normal message, before sending it to the client.
If a large message is compressed, the server will uncompressed it before
sending it to stomp clients. The default value of
`stompMinLargeMessageSize` is the same as the default value of
[min-large-message-size](large-messages.md#configuring-parameters).
### Stomp Over Web Sockets
Apache ActiveMQ Artemis also support Stomp over [Web
Sockets](https://html.spec.whatwg.org/multipage/web-sockets.html). Modern web browser which
support Web Sockets can send and receive Stomp messages from Apache ActiveMQ Artemis.
Stomp over Web Sockets is supported via the normal Stomp acceptor:
```xml
<acceptor name="stomp-ws-acceptor">tcp://localhost:61614?protocols=STOMP</acceptor>
```
With this configuration, Apache ActiveMQ Artemis will accept Stomp connections over Web
Sockets on the port `61614`. Web browser can
then connect to `ws://<server>:61614` using a Web Socket to send
and receive Stomp messages.
A companion JavaScript library to ease client-side development is
available from [GitHub](https://github.com/jmesnil/stomp-websocket)
(please see its [documentation](http://jmesnil.net/stomp-websocket/doc/)
for a complete description).
The payload length of websocket frames can vary between client implementations. By default
Apache ActiveMQ Artemis will accept frames with a payload length of 65,536. If the client
needs to send payloads longer than this in a single frame this length can be adjusted by
using the `stompMaxFramePayloadLength` URL parameter on the acceptor.
The `stomp-websockets` example shows how to configure Apache ActiveMQ Artemis server to
have web browsers and Java applications exchanges messages on a JMS
topic.
## REST
Please see [Rest Interface](rest.md)
Here are the supported protocols and their corresponding value used in the `protocols` url parameter.
Protocol|`protocols` value
---|---
Core (Artemis & HornetQ native)|`CORE`
OpenWire (5.x native)|`OPENWIRE`
AMQP|`AMQP`
MQTT|`MQTT`
STOMP|`STOMP`

View File

@ -20,10 +20,10 @@ Here is an example of the XML used to set resource limits:
```
Unlike the `match` from `address-setting`, this `match` does not use
any wild-card syntax. It's a simple 1:1 mapping of the limits to a user.
any wild-card syntax. It's a simple 1:1 mapping of the limits to a **user**.
`max-connections` defines how many connections the matched user can make
- `max-connections` defines how many connections the matched user can make
to the broker. The default is -1 which means there is no limit.
`max-queues` defines how many queues the matched user can create. The default
- `max-queues` defines how many queues the matched user can create. The default
is -1 which means there is no limit.

File diff suppressed because it is too large Load Diff

View File

@ -30,5 +30,5 @@ same property on the core message before sending.
## Example
See the [examples](examples.md) chapter for an example which shows how scheduled messages can be used with
See the [Scheduled Message Example](examples.md#scheduled-message) which shows how scheduled messages can be used with
JMS.

File diff suppressed because it is too large Load Diff

View File

@ -32,15 +32,15 @@ has definitely reached the server, and a response has been sent back to
the client. This can be configured individually for durable and
non-durable messages, and is determined by the following two URL parameters:
- `blockOnDurableSend`. If this is set to `true` then all calls to
send for durable messages on non transacted sessions will block
until the message has reached the server, and a response has been
sent back. The default value is `true`.
- `blockOnDurableSend`. If this is set to `true` then all calls to
send for durable messages on non transacted sessions will block
until the message has reached the server, and a response has been
sent back. The default value is `true`.
- `blockOnNonDurableSend`. If this is set to `true` then all calls to
send for non-durable messages on non transacted sessions will block
until the message has reached the server, and a response has been
sent back. The default value is `false`.
- `blockOnNonDurableSend`. If this is set to `true` then all calls to
send for non-durable messages on non transacted sessions will block
until the message has reached the server, and a response has been
sent back. The default value is `false`.
Setting block on sends to `true` can reduce performance since each send
requires a network round trip before the next send can be performed.

View File

@ -36,5 +36,5 @@ the detection algorithm. See [thread pooling](thread-pooling.md) for more detail
## Example
See the [examples](examples.md) chapter for an example which shows how to detect a slow consumer
See the [slow consumer example](examples.md#slow-consumer) which shows how to detect a slow consumer
with Apache ActiveMQ Artemis.

View File

@ -1,39 +1,16 @@
# Spring Integration
Apache ActiveMQ Artemis provides a simple bootstrap class,
`org.apache.activemq.integration.spring.SpringJmsBootstrap`, for
integration with Spring. To use it, you configure Apache ActiveMQ Artemis as you always
would, through its various configuration files like
`broker.xml`.
`org.apache.activemq.artemis.integration.spring.SpringJmsBootstrap`, for
integration with Spring. To use it, you configure Apache ActiveMQ Artemis as
you always would, through its various configuration files like `broker.xml`.
Here we've specified a `javax.jms.ConnectionFactory` we want bound to a
`ConnectionFactory` entry as well as a queue destination bound to a
`/queue/exampleQueue` entry. Using the `SpringJmsBootStrap` bean will
automatically populate the Spring context with references to those beans
so that you can use them. Below is an example Spring JMS bean file
taking advantage of this feature:
The `SpringJmsBootstrap` class extends the EmbeddedJMS class talked about in
[embedding ActiveMQ](embedding-activemq.md) and the same defaults and
configuration options apply. See the javadocs for more details on other
properties of the bean class.
```xml
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.springframework.org/schema/beans
http://www.springframework.org/schema/beans/spring-beans-3.0.xsd">
## Example
<bean id="EmbeddedJms" class="org.apache.activemq.integration.spring.SpringJmsBootstrap" init-method="start"/>
<bean id="listener" class="org.apache.activemq.tests.integration.spring.ExampleListener"/>
<bean id="listenerContainer" class="org.springframework.jms.listener.DefaultMessageListenerContainer">
<property name="connectionFactory" ref="ConnectionFactory"/>
<property name="destination" ref="/queue/exampleQueue"/>
<property name="messageListener" ref="listener"/>
</bean>
</beans>
```
As you can see, the `listenerContainer` bean references the components
defined in the `activemq-jms.xml` file. The `SpringJmsBootstrap` class
extends the EmbeddedJMS class talked about in [JMS API](embedding-activemq.md) and the same defaults
and configuration options apply. Also notice that an `init-method` must
be declared with a start value so that the bean's lifecycle is executed.
See the javadocs for more details on other properties of the bean class.
See the [Spring Integration Example](examples.md#spring-integration) for a
demonstration of how this can work.

View File

@ -0,0 +1,293 @@
# STOMP
[STOMP](https://stomp.github.io/) is a text-orientated wire protocol that
allows STOMP clients to communicate with STOMP Brokers. Apache ActiveMQ Artemis
supports STOMP 1.0, 1.1 and 1.2.
STOMP clients are available for several languages and platforms making it a
good choice for interoperability.
By default there are `acceptor` elements configured to accept STOMP connections
on ports `61616` and `61613`.
See the general [Protocols and Interoperability](protocols-interoperability.md)
chapter for details on configuring an `acceptor` for STOMP.
Refer to the STOMP examples for a look at some of this functionality in action.
## Limitations
The STOMP specification identifies **transactional acknowledgements** as an
optional feature. Support for transactional acknowledgements is not implemented
in Apache ActiveMQ Artemis. The `ACK` frame can not be part of a transaction.
It will be ignored if its `transaction` header is set.
## Virtual Hosting
Apache ActiveMQ Artemis currently doesn't support virtual hosting, which means
the `host` header in `CONNECT` frame will be ignored.
## Mapping STOMP destinations to addresses and queues
STOMP clients deals with *destinations* when sending messages and subscribing.
Destination names are simply strings which are mapped to some form of
destination on the server - how the server translates these is left to the
server implementation.
In Apache ActiveMQ Artemis, these destinations are mapped to *addresses* and
*queues* depending on the operation being done and the desired semantics (e.g.
anycast or multicast).
## Sending
When a STOMP client sends a message (using a `SEND` frame), the protocol
manager looks at the message to determine where to route it and potentially how
to create the address and/or queue to which it is being sent. The protocol
manager uses either of the following bits of information from the frame to
determine the routing type:
1. The value of the `destination-type` header. Valid values are `ANYCAST` and
`MULTICAST` (case sensitive).
2. The "prefix" on the `destination` header. See [additional
info](address-model.md#using-prefixes-to-determine-routing-type) on
prefixes.
If no indication of routing type is supplied then anycast semantics are used.
The `destination` header maps to an address of the same name. If the
`destination` header used a prefix then the prefix is stripped.
## Subscribing
When a STOMP client subscribes to a destination (using a `SUBSCRIBE` frame),
the protocol manager looks at the frame to determine what subscription
semantics to use and potentially how to create the address and/or queue for the
subscription. The protocol manager uses either of the following bits of
information from the frame to determine the routing type:
1. The value of the `subscription-type` header. Valid values are `ANYCAST` and
`MULTICAST` (case sensitive).
2. The "prefix" on the `destination` header. See [additional
info](address-model.md#using-prefixes-to-determine-routing-type) on
prefixes.
If no indication of routing type is supplied then anycast semantics are used.
The `destination` header maps to an address of the same name if multicast is
used or to a queue of the same name if anycast is used. If the `destination`
header used a prefix then the prefix is stripped.
## STOMP heart-beating and connection-ttl
Well behaved STOMP clients will always send a `DISCONNECT` frame before closing
their connections. In this case the server will clear up any server side
resources such as sessions and consumers synchronously. However if STOMP
clients exit without sending a `DISCONNECT` frame or if they crash the server
will have no way of knowing immediately whether the client is still alive or
not. STOMP connections therefore default to a `connection-ttl` value of 1
minute (see chapter on [connection-ttl](connection-ttl.md) for more
information. This value can be overridden using the `connection-ttl-override`
property or if you need a specific connectionTtl for your stomp connections
without affecting the broker-wide `connection-ttl-override` setting, you can
configure your stomp acceptor with the `connectionTtl` property, which is used
to set the ttl for connections that are created from that acceptor. For
example:
```xml
<acceptor name="stomp-acceptor">tcp://localhost:61613?protocols=STOMP;connectionTtl=20000</acceptor>
```
The above configuration will make sure that any STOMP connection that is
created from that acceptor and does not include a `heart-beat` header or
disables client-to-server heart-beats by specifying a `0` value will have its
`connection-ttl` set to 20 seconds. The `connectionTtl` set on an acceptor will
take precedence over `connection-ttl-override`. The default `connectionTtl` is
60,000 milliseconds.
Since STOMP 1.0 does not support heart-beating then all connections from STOMP
1.0 clients will have a connection TTL imposed upon them by the broker based on
the aforementioned configuration options. Likewise, any STOMP 1.1 or 1.2
clients that don't specify a `heart-beat` header or disable client-to-server
heart-beating (e.g. by sending `0,X` in the `heart-beat` header) will have a
connection TTL imposed upon them by the broker.
For STOMP 1.1 and 1.2 clients which send a non-zero client-to-server
`heart-beat` header value then their connection TTL will be set accordingly.
However, the broker will not strictly set the connection TTL to the same value
as the specified in the `heart-beat` since even small network delays could then
cause spurious disconnects. Instead, the client-to-server value in the
`heart-beat` will be multiplied by the `heartBeatConnectionTtlModifer`
specified on the acceptor. The `heartBeatConnectionTtlModifer` is a decimal
value that defaults to `2.0` so for example, if a client sends a `heart-beat`
header of `1000,0` the the connection TTL will be set to `2000` so that the
data or ping frames sent every 1000 milliseconds will have a sufficient cushion
so as not to be considered late and trigger a disconnect. This is also in
accordance with the STOMP 1.1 and 1.2 specifications which both state, "because
of timing inaccuracies, the receiver SHOULD be tolerant and take into account
an error margin."
The minimum and maximum connection TTL allowed can also be specified on the
acceptor via the `connectionTtlMin` and `connectionTtlMax` properties
respectively. The default `connectionTtlMin` is 1000 and the default
`connectionTtlMax` is Java's `Long.MAX_VALUE` meaning there essentially is no
max connection TTL by default. Keep in mind that the
`heartBeatConnectionTtlModifer` is relevant here. For example, if a client
sends a `heart-beat` header of `20000,0` and the acceptor is using a
`connectionTtlMax` of `30000` and a default `heartBeatConnectionTtlModifer` of
`2.0` then the connection TTL would be `40000` (i.e. `20000` * `2.0`) which
would exceed the `connectionTtlMax`. In this case the server would respond to
the client with a `heart-beat` header of `0,15000` (i.e. `30000` / `2.0`). As
described previously, this is to make sure there is a sufficient cushion for
the client heart-beats in accordance with the STOMP 1.1 and 1.2 specifications.
The same kind of calculation is done for `connectionTtlMin`.
The minimum server-to-client heart-beat value is 500ms.
> **Note:**
>
> Please note that the STOMP protocol version 1.0 does not contain any
> heart-beat frame. It is therefore the user's responsibility to make sure data
> is sent within connection-ttl or the server will assume the client is dead
> and clean up server side resources. With STOMP 1.1 users can use heart-beats
> to maintain the life cycle of stomp connections.
## Selector/Filter expressions
STOMP subscribers can specify an expression used to select or filter what the
subscriber receives using the `selector` header. The filter expression syntax
follows the *core filter syntax* described in the [Filter
Expressions](filter-expressions.md) documentation.
## STOMP and JMS interoperability
### Sending and consuming STOMP message from JMS or Core API
STOMP is mainly a text-orientated protocol. To make it simpler to interoperate
with JMS and Core API, our STOMP implementation checks for presence of the
`content-length` header to decide how to map a STOMP 1.0 message to a JMS
Message or a Core message.
If the STOMP 1.0 message does *not* have a `content-length` header, it will be
mapped to a JMS *TextMessage* or a Core message with a *single nullable
SimpleString in the body buffer*.
Alternatively, if the STOMP 1.0 message *has* a `content-length` header, it
will be mapped to a JMS *BytesMessage* or a Core message with a *byte[] in the
body buffer*.
The same logic applies when mapping a JMS message or a Core message to STOMP. A
STOMP 1.0 client can check the presence of the `content-length` header to
determine the type of the message body (String or bytes).
### Message IDs for STOMP messages
When receiving STOMP messages via a JMS consumer or a QueueBrowser, the
messages have no properties like JMSMessageID by default. However this may
bring some inconvenience to clients who wants an ID for their purpose. The
broker STOMP provides a parameter to enable message ID on each incoming STOMP
message. If you want each STOMP message to have a unique ID, just set the
`stompEnableMessageId` to true. For example:
```xml
<acceptor name="stomp-acceptor">tcp://localhost:61613?protocols=STOMP;stompEnableMessageId=true</acceptor>
```
When the server starts with the above setting, each stomp message sent through
this acceptor will have an extra property added. The property key is
`amq-message-id` and the value is a String representation of a long type
internal message id prefixed with `STOMP`, like:
```
amq-message-id : STOMP12345
```
The default `stomp-enable-message-id` value is `false`.
## Durable Subscriptions
The `SUBSCRIBE` and `UNSUBSCRIBE` frames can be augmented with special headers
to create and destroy durable subscriptions respectively.
To create a durable subscription the `client-id` header must be set on the
`CONNECT` frame and the `durable-subscription-name` must be set on the
`SUBSCRIBE` frame. The combination of these two headers will form the identity
of the durable subscription.
To delete a durable subscription the `client-id` header must be set on the
`CONNECT` frame and the `durable-subscription-name` must be set on the
`UNSUBSCRIBE` frame. The values for these headers should match what was set on
the `SUBSCRIBE` frame to delete the corresponding durable subscription.
It is possible to pre-configure durable subscriptions since the STOMP
implementation creates the queue used for the durable subscription in a
deterministic way (i.e. using the format of `client-id`.`subscription-name`).
For example, if you wanted to configure a durable subscription on the address
`myAddress` with a client-id of `myclientid` and a subscription name of
`mysubscription` then configure the durable subscription:
```xml
<addresses>
<address name="myAddress">
<multicast>
<queue name="myclientid.mysubscription"/>
</multicast>
</address>
</addresses>
```
## Handling of Large Messages with STOMP
STOMP clients may send very large frame bodies which can exceed the size of the
broker's internal buffer, causing unexpected errors. To prevent this situation
from happening, the broker provides a STOMP configuration attribute
`stompMinLargeMessageSize`. This attribute can be configured inside a stomp
acceptor, as a parameter. For example:
```xml
<acceptor name="stomp-acceptor">tcp://localhost:61613?protocols=STOMP;stompMinLargeMessageSize=10240</acceptor>
```
The type of this attribute is integer. When this attributed is configured, the
broker will check the size of the body of each STOMP frame arrived from
connections established with this acceptor. If the size of the body is equal or
greater than the value of `stompMinLargeMessageSize`, the message will be
persisted as a large message. When a large message is delievered to a STOMP
consumer, the broker will automatically handle the conversion from a large
message to a normal message, before sending it to the client.
If a large message is compressed, the server will uncompressed it before
sending it to stomp clients. The default value of `stompMinLargeMessageSize` is
the same as the default value of
[min-large-message-size](large-messages.md#configuring-parameters).
## Web Sockets
Apache ActiveMQ Artemis also support STOMP over [Web
Sockets](https://html.spec.whatwg.org/multipage/web-sockets.html). Modern web
browsers which support Web Sockets can send and receive STOMP messages.
STOMP over Web Sockets is supported via the normal STOMP acceptor:
```xml
<acceptor name="stomp-ws-acceptor">tcp://localhost:61614?protocols=STOMP</acceptor>
```
With this configuration, Apache ActiveMQ Artemis will accept STOMP connections
over Web Sockets on the port `61614`. Web browsers can then connect to
`ws://<server>:61614` using a Web Socket to send and receive STOMP messages.
A companion JavaScript library to ease client-side development is available
from [GitHub](https://github.com/jmesnil/stomp-websocket) (please see its
[documentation](http://jmesnil.net/stomp-websocket/doc/) for a complete
description).
The payload length of Web Socket frames can vary between client
implementations. By default the broker will accept frames with a payload length
of 65,536. If the client needs to send payloads longer than this in a single
frame this length can be adjusted by using the `stompMaxFramePayloadLength` URL
parameter on the acceptor.
The `stomp-websockets` example shows how to configure an Apache ActiveMQ
Artemis broker to have web browsers and Java applications exchanges messages.

View File

@ -11,7 +11,7 @@
Somejava s = new SomeJava();
```
> **Note**
> **Note:**
>
> This is a Note

View File

@ -1,216 +0,0 @@
# Tools
You can use the artemis cli interface to execute data maintenance tools:
This is a list of sub-commands available
Name | Description
:--- | :---
exp | Export the message data using a special and independent XML format
imp | Imports the journal to a running broker using the output from expt
data | Prints a report about journal records and summary of existent records, as well a report on paging
encode | shows an internal format of the journal encoded to String
decode | imports the internal journal format from encode
You can use the help at the tool for more information on how to execute each of the tools. For example:
```
$ ./artemis help data print
NAME
artemis data print - Print data records information (WARNING: don't use
while a production server is running)
SYNOPSIS
artemis data print [--bindings <binding>] [--journal <journal>]
[--paging <paging>]
OPTIONS
--bindings <binding>
The folder used for bindings (default ../data/bindings)
--journal <journal>
The folder used for messages journal (default ../data/journal)
--paging <paging>
The folder used for paging (default ../data/paging)
```
For a full list of data tools commands available use:
```
NAME
artemis data - data tools group
(print|imp|exp|encode|decode|compact) (example ./artemis data print)
SYNOPSIS
artemis data
artemis data compact [--broker <brokerConfig>] [--verbose]
[--paging <paging>] [--journal <journal>]
[--large-messages <largeMessges>] [--bindings <binding>]
artemis data decode [--broker <brokerConfig>] [--suffix <suffix>]
[--verbose] [--paging <paging>] [--prefix <prefix>] [--file-size <size>]
[--directory <directory>] --input <input> [--journal <journal>]
[--large-messages <largeMessges>] [--bindings <binding>]
artemis data encode [--directory <directory>] [--broker <brokerConfig>]
[--suffix <suffix>] [--verbose] [--paging <paging>] [--prefix <prefix>]
[--file-size <size>] [--journal <journal>]
[--large-messages <largeMessges>] [--bindings <binding>]
artemis data exp [--broker <brokerConfig>] [--verbose]
[--paging <paging>] [--journal <journal>]
[--large-messages <largeMessges>] [--bindings <binding>]
artemis data imp [--host <host>] [--verbose] [--port <port>]
[--password <password>] [--transaction] --input <input> [--user <user>]
artemis data print [--broker <brokerConfig>] [--verbose]
[--paging <paging>] [--journal <journal>]
[--large-messages <largeMessges>] [--bindings <binding>]
COMMANDS
With no arguments, Display help information
print
Print data records information (WARNING: don't use while a
production server is running)
With --broker option, This would override the broker configuration
from the bootstrap
With --verbose option, Adds more information on the execution
With --paging option, The folder used for paging (default from
broker.xml)
With --journal option, The folder used for messages journal (default
from broker.xml)
With --large-messages option, The folder used for large-messages
(default from broker.xml)
With --bindings option, The folder used for bindings (default from
broker.xml)
exp
Export all message-data using an XML that could be interpreted by
any system.
With --broker option, This would override the broker configuration
from the bootstrap
With --verbose option, Adds more information on the execution
With --paging option, The folder used for paging (default from
broker.xml)
With --journal option, The folder used for messages journal (default
from broker.xml)
With --large-messages option, The folder used for large-messages
(default from broker.xml)
With --bindings option, The folder used for bindings (default from
broker.xml)
imp
Import all message-data using an XML that could be interpreted by
any system.
With --host option, The host used to import the data (default
localhost)
With --verbose option, Adds more information on the execution
With --port option, The port used to import the data (default 61616)
With --password option, User name used to import the data. (default
null)
With --transaction option, If this is set to true you will need a
whole transaction to commit at the end. (default false)
With --input option, The input file name (default=exp.dmp)
With --user option, User name used to import the data. (default
null)
decode
Decode a journal's internal format into a new journal set of files
With --broker option, This would override the broker configuration
from the bootstrap
With --suffix option, The journal suffix (default amq)
With --verbose option, Adds more information on the execution
With --paging option, The folder used for paging (default from
broker.xml)
With --prefix option, The journal prefix (default activemq-data)
With --file-size option, The journal size (default 10485760)
With --directory option, The journal folder (default journal folder
from broker.xml)
With --input option, The input file name (default=exp.dmp)
With --journal option, The folder used for messages journal (default
from broker.xml)
With --large-messages option, The folder used for large-messages
(default from broker.xml)
With --bindings option, The folder used for bindings (default from
broker.xml)
encode
Encode a set of journal files into an internal encoded data format
With --directory option, The journal folder (default the journal
folder from broker.xml)
With --broker option, This would override the broker configuration
from the bootstrap
With --suffix option, The journal suffix (default amq)
With --verbose option, Adds more information on the execution
With --paging option, The folder used for paging (default from
broker.xml)
With --prefix option, The journal prefix (default activemq-data)
With --file-size option, The journal size (default 10485760)
With --journal option, The folder used for messages journal (default
from broker.xml)
With --large-messages option, The folder used for large-messages
(default from broker.xml)
With --bindings option, The folder used for bindings (default from
broker.xml)
compact
Compacts the journal of a non running server
With --broker option, This would override the broker configuration
from the bootstrap
With --verbose option, Adds more information on the execution
With --paging option, The folder used for paging (default from
broker.xml)
With --journal option, The folder used for messages journal (default
from broker.xml)
With --large-messages option, The folder used for large-messages
(default from broker.xml)
With --bindings option, The folder used for bindings (default from
broker.xml)
```

View File

@ -8,18 +8,18 @@ in the queue indefinitely, clogging the system.
There are 2 ways to deal with these undelivered messages:
- Delayed redelivery.
- Delayed redelivery.
It is possible to delay messages redelivery. This gives the client some
time to recover from any transient failures and to prevent overloading
its network or CPU resources.
It is possible to delay messages redelivery. This gives the client some
time to recover from any transient failures and to prevent overloading
its network or CPU resources.
- Dead Letter Address.
- Dead Letter Address.
It is also possible to configure a dead letter address so that after
a specified number of unsuccessful deliveries, messages are removed
from their queue and sent to the dead letter address. These messages
will not be delivered again from this queue.
It is also possible to configure a dead letter address so that after
a specified number of unsuccessful deliveries, messages are removed
from their queue and sent to the dead letter address. These messages
will not be delivered again from this queue.
Both options can be combined for maximum flexibility.
@ -130,15 +130,15 @@ set of addresses (see [Understanding the Wildcard Syntax](wildcard-syntax.md)).
Dead letter messages which are consumed from a dead letter address have
the following properties:
- `_AMQ_ORIG_ADDRESS`
- `_AMQ_ORIG_ADDRESS`
a String property containing the *original address* of the dead
letter message
a String property containing the *original address* of the dead
letter message
- `_AMQ_ORIG_QUEUE`
- `_AMQ_ORIG_QUEUE`
a String property containing the *original queue* of the dead letter
message
a String property containing the *original queue* of the dead letter
message
### Example

View File

@ -1,29 +1,26 @@
# Unit Testing
The package ```artemis-junit``` provides tools to facilitate how to run Artemis resources inside Junit Tests.
The package `artemis-junit` provides tools to facilitate how to run Artemis resources inside JUnit Tests.
These are provided as junit rules and can make it easier to embed Messaging functionality on your tests.
These are provided as JUnit "rules" and can make it easier to embed messaging functionality on your tests.
## Example
### Import this on your pom.xml
```xml
<dependency>
<groupId>org.apache.activemq</groupId>
<artifactId>artemis-junit</artifactId>
<!-- replace this for the version you are using -->
<version>1.5.0</version>
<scope>test</scope>
<groupId>org.apache.activemq</groupId>
<artifactId>artemis-junit</artifactId>
<!-- replace this for the version you are using -->
<version>2.5.0</version>
<scope>test</scope>
</dependency>
```
### Declare a rule on your JUnit Test
```java
import org.apache.activemq.artemis.junit.EmbeddedJMSResource;
import org.junit.Rule;
@ -44,34 +41,31 @@ public class MyTest {
This will start a server that will be available for your test:
```
ain] 17:00:16,644 INFO [org.apache.activemq.artemis.core.server] AMQ221000: live Message Broker is starting with configuration Broker Configuration (clustered=false,journalDirectory=data/journal,bindingsDirectory=data/bindings,largeMessagesDirectory=data/largemessages,pagingDirectory=data/paging)
[main] 17:00:16,644 INFO [org.apache.activemq.artemis.core.server] AMQ221000: live Message Broker is starting with configuration Broker Configuration (clustered=false,journalDirectory=data/journal,bindingsDirectory=data/bindings,largeMessagesDirectory=data/largemessages,pagingDirectory=data/paging)
[main] 17:00:16,666 INFO [org.apache.activemq.artemis.core.server] AMQ221045: libaio is not available, switching the configuration into NIO
[main] 17:00:16,688 INFO [org.apache.activemq.artemis.core.server] AMQ221043: Protocol module found: [artemis-server]. Adding protocol support for: CORE
[main] 17:00:16,801 INFO [org.apache.activemq.artemis.core.server] AMQ221007: Server is now live
[main] 17:00:16,801 INFO [org.apache.activemq.artemis.core.server] AMQ221001: Apache ActiveMQ Artemis Message Broker version 1.5.0-SNAPSHOT [embedded-jms-server, nodeID=39e78380-842c-11e6-9e43-f45c8992f3c7]
[main] 17:00:16,891 INFO [org.apache.activemq.artemis.core.server] AMQ221002: Apache ActiveMQ Artemis Message Broker version 1.5.0-SNAPSHOT [39e78380-842c-11e6-9e43-f45c8992f3c7] stopped, uptime 0.272 seconds
```
### Ordering rules
This is actually a Junit feature, but this could be helpful on pre-determining the order on which rules are executed.
This is actually a JUnit feature, but this could be helpful on pre-determining the order on which rules are executed.
```java
ActiveMQDynamicProducerResource producer = new ActiveMQDynamicProducerResource(server.getVmURL());
@Rule
public RuleChain ruleChain = RuleChain.outerRule(new ThreadLeakCheckRule()).around(server).around(producer);
ActiveMQDynamicProducerResource producer = new ActiveMQDynamicProducerResource(server.getVmURL());
@Rule
public RuleChain ruleChain = RuleChain.outerRule(new ThreadLeakCheckRule()).around(server).around(producer);
```
### Available Rules
Name | Description
:--- | :---
EmbeddedActiveMQResource | It will run a Server, without the JMS manager
EmbeddedJMSResource | It will run a Server, including the JMS Manager
ActiveMQConsumerResource | It will automate the creation of a consumer
ActiveMQProducerResource | It will automate the creation of a producer
ThreadLeakCheckRule | It will check that all threads have been finished after the test is finished
--- | ---
EmbeddedActiveMQResource | Run a Server, without the JMS manager
EmbeddedJMSResource | Run a Server, including the JMS Manager
ActiveMQConsumerResource | Automate the creation of a consumer
ActiveMQProducerResource | Automate the creation of a producer
ThreadLeakCheckRule | Check that all threads have been finished after the test is finished

View File

@ -1,38 +1,41 @@
# Upgrading the Broker
Apache ActiveMQ 5.x (and previous versions) is runnable out of the box by executing
the command: `./bin/activemq run`. The ActiveMQ Artemis broker follows a different
paradigm where the project distribution serves as the broker "home" and one or more
broker "instances" are created which reference the "home" for resources (e.g. jar files)
which can be safely shared between broker instances. Therefore, an instance of the broker
must be created before it can be run. This may seems like an overhead at first
glance, but it becomes very practical when updating to a new Artemis version for example.
Apache ActiveMQ 5.x (and previous versions) is runnable out of the box by
executing the command: `./bin/activemq run`. The ActiveMQ Artemis broker
follows a different paradigm where the project distribution serves as the
broker "home" and one or more broker "instances" are created which reference
the "home" for resources (e.g. jar files) which can be safely shared between
broker instances. Therefore, an instance of the broker must be created before
it can be run. This may seems like an overhead at first glance, but it becomes
very practical when updating to a new Artemis version for example.
To create an Artemis broker instance navigate into the Artemis home folder and run:
`./bin/artemis create /path/to/myBrokerInstance` on the command line.
To create an Artemis broker instance navigate into the Artemis home folder and
run: `./bin/artemis create /path/to/myBrokerInstance` on the command line.
> **Note**
Because of this separation it's very easy to upgrade Artemis in most cases.
> **Note:**
>
> It's recommended to choose a folder different than the on where Apache Artemis was
> downloaded. This separation allows you run multiple broker instances with the same
> Artemis "home" for example. It also simplifies updating to newer versions of Artemis.
Because of this separation it's very easy to upgrade Artemis in most cases.
> It's recommended to choose a folder different than the on where Apache
> Artemis was downloaded. This separation allows you run multiple broker
> instances with the same Artemis "home" for example. It also simplifies
> updating to newer versions of Artemis.
## General Upgrade Procedure
Upgrading may require some specific steps noted in the [versions](versions.md), but the
general process is as follows:
Upgrading may require some specific steps noted in the [versions](versions.md),
but the general process is as follows:
1. Navigate to the `etc` folder of the broker instance that's being upgraded
1. Open `artemis.profile` (`artemis.profile.cmd` on Windows). It contains a property
which is relevant for the upgrade:
1. Open `artemis.profile` (`artemis.profile.cmd` on Windows). It contains a
property which is relevant for the upgrade:
```
ARTEMIS_HOME='/path/to/apache-artemis-version'
```
The `ARTEMIS_HOME` property is used to link the instance with the home.
_In most cases_ the instance can be upgraded to a newer version simply by changing the
value of this property to the location of the new broker home. Please refer to the
aforementioned [versions](versions.md) document for additional upgrade steps (if required).
The `ARTEMIS_HOME` property is used to link the instance with the home. _In
most cases_ the instance can be upgraded to a newer version simply by changing
the value of this property to the location of the new broker home. Please refer
to the aforementioned [versions](versions.md) document for additional upgrade
steps (if required).

View File

@ -1,74 +0,0 @@
# Using AMQP
Apache ActiveMQ Artemis is also a pure AMQP 1.0 broker, with a high performant and feature complete protocol manager for AMQP.
You can use *any* AMQP 1.0 compatible clients.
A short list includes:
- qpid clients at the [qpid project](https://qpid.apache.org/download.html)
- [.NET Clients](https://blogs.apache.org/activemq/entry/using-net-libraries-with-activemq)
- [Javascript NodeJS](https://github.com/noodlefrenzy/node-amqp10)
- [Java Script RHEA](https://github.com/grs/rhea)
... and many others.
# Message Conversions
The broker will not perform any message conversion to any other protocols when sending AMQP and receiving AMQP.
However if you intend your message to be received on a AMQP JMS Client, you must follow the JMS Mapping convention:
- [JMS Mapping Conventions](https://www.oasis-open.org/committees/download.php/53086/amqp-bindmap-jms-v1.0-wd05.pdf)
If you send a body type that is not recognized by this specification the conversion between AMQP and any other protocol will make it a Binary Message.
So, make sure you follow these conventions if you intend to cross protocols or languages. Especially on the message body.
A compatibility setting, allows aligning the naming convention of AMQP queues (JMS Durable and Shared Subscriptions) with CORE.
For backwards compatibility reasons, you need to explicitly enable this via broker configuration:
* amqp-use-core-subscription-naming
* true - use queue naming convention that is aligned with CORE.
* false (DEFAULT) - use older naming convention.
# Example
We have a few examples as part of the Artemis distribution:
- .NET:
* ./examples/protocols/amqp/dotnet
- ProtonCPP
* ./examples/protocols/amqp/proton-cpp
- Ruby
* ./examples/protocols/amqp/proton-ruby
- Java (Using the qpid JMS Client)
* ./examples/protocols/amqp/queue
- Interceptors
* ./examples/features/standard/broker-plugin
# Intercepting and changing messages
We don't recommend changing messages at the server's side for a few reasons:
- AMQPMessages are meant to be immutable
- The message won't be the original message the user sent
- AMQP has the possibility of signing messages. The signature would be broken.
- For performance reasons. We try not to re-encode (or even decode) messages.
If regardless these recommendations you still need and want to intercept and change AMQP Messages, look at the example under ./examples/features/standard/broker-plugin.
This example will send AMQP Message and modify properties before they reach the journals and are sent to the consumers.

View File

@ -1,212 +0,0 @@
# Using Core
Apache ActiveMQ Artemis core is a completely JMS-agnostic messaging system with its own
API. We call this the *core API*.
If you don't want to use JMS or other protocols you can use the core API directly. The core
API provides all the functionality of JMS but without much of the
complexity. It also provides features that are not available using JMS.
## Core Messaging Concepts
Some of the core messaging concepts are similar to JMS concepts, but
core messaging concepts differ in some ways. In general the core
messaging API is simpler than the JMS API, since we remove distinctions
between queues, topics and subscriptions. We'll discuss each of the
major core messaging concepts in turn, but to see the API in detail,
please consult the Javadoc.
### Message
- A message is the unit of data which is sent between clients and
servers.
- A message has a body which is a buffer containing convenient methods
for reading and writing data into it.
- A message has a set of properties which are key-value pairs. Each
property key is a string and property values can be of type integer,
long, short, byte, byte[], String, double, float or boolean.
- A message has an *address* it is being sent to. When the message
arrives on the server it is routed to any queues that are bound to
the address - if the queues are bound with any filter, the message
will only be routed to that queue if the filter matches. An address
may have many queues bound to it or even none. There may also be
entities other than queues, like *diverts* bound to addresses.
- Messages can be either durable or non durable. Durable messages in a
durable queue will survive a server crash or restart. Non durable
messages will never survive a server crash or restart.
- Messages can be specified with a priority value between 0 and 9. 0
represents the lowest priority and 9 represents the highest.
Apache ActiveMQ Artemis will attempt to deliver higher priority messages before
lower priority ones.
- Messages can be specified with an optional expiry time. Apache ActiveMQ Artemis
will not deliver messages after its expiry time has been exceeded.
- Messages also have an optional timestamp which represents the time
the message was sent.
- Apache ActiveMQ Artemis also supports the sending/consuming of very large messages
much larger than can fit in available RAM at any one time.
### Address
A server maintains a mapping between an address and a set of queues.
Zero or more queues can be bound to a single address. Each queue can be
bound with an optional message filter. When a message is routed, it is
routed to the set of queues bound to the message's address. If any of
the queues are bound with a filter expression, then the message will
only be routed to the subset of bound queues which match that filter
expression.
Other entities, such as *diverts* can also be bound to an address and
messages will also be routed there.
> **Note**
>
> In core, there is no concept of a Topic, Topic is a JMS only term.
> Instead, in core, we just deal with *addresses* and *queues*.
>
> For example, a JMS topic would be implemented by a single address to
> which many queues are bound. Each queue represents a subscription of
> the topic. A JMS Queue would be implemented as a single address to
> which one queue is bound - that queue represents the JMS queue.
### Queue
Queues can be durable, meaning the messages they contain survive a
server crash or restart, as long as the messages in them are durable.
Non durable queues do not survive a server restart or crash even if the
messages they contain are durable.
Queues can also be temporary, meaning they are automatically deleted
when the client connection is closed, if they are not explicitly deleted
before that.
Queues can be bound with an optional filter expression. If a filter
expression is supplied then the server will only route messages that
match that filter expression to any queues bound to the address.
Many queues can be bound to a single address. A particular queue is only
bound to a maximum of one address.
### ServerLocator
Clients use `ServerLocator` instances to create `ClientSessionFactory`
instances. `ServerLocator` instances are used to locate servers and
create connections to them.
In JMS terms think of a `ServerLocator` in the same way you would a JMS
Connection Factory.
`ServerLocator` instances are created using the `ActiveMQClient` factory
class.
### ClientSessionFactory
Clients use `ClientSessionFactory` instances to create `ClientSession`
instances. `ClientSessionFactory` instances are basically the connection
to a server
In JMS terms think of them as JMS Connections.
`ClientSessionFactory` instances are created using the `ServerLocator`
class.
### ClientSession
A client uses a ClientSession for consuming and producing messages and
for grouping them in transactions. ClientSession instances can support
both transactional and non transactional semantics and also provide an
`XAResource` interface so messaging operations can be performed as part
of a
[JTA](http://www.oracle.com/technetwork/java/javaee/tech/jta-138684.html)
transaction.
ClientSession instances group ClientConsumers and ClientProducers.
ClientSession instances can be registered with an optional
`SendAcknowledgementHandler`. This allows your client code to be
notified asynchronously when sent messages have successfully reached the
server. This unique Apache ActiveMQ Artemis feature, allows you to have full guarantees
that sent messages have reached the server without having to block on
each message sent until a response is received. Blocking on each
messages sent is costly since it requires a network round trip for each
message sent. By not blocking and receiving send acknowledgements
asynchronously you can create true end to end asynchronous systems which
is not possible using the standard JMS API. For more information on this
advanced feature please see the section [Guarantees of sends and commits](send-guarantees.md).
### ClientConsumer
Clients use `ClientConsumer` instances to consume messages from a queue.
Core Messaging supports both synchronous and asynchronous message
consumption semantics. `ClientConsumer` instances can be configured with
an optional filter expression and will only consume messages which match
that expression.
### ClientProducer
Clients create `ClientProducer` instances on `ClientSession` instances
so they can send messages. ClientProducer instances can specify an
address to which all sent messages are routed, or they can have no
specified address, and the address is specified at send time for the
message.
> **Warning**
>
> Please note that ClientSession, ClientProducer and ClientConsumer
> instances are *designed to be re-used*.
>
> It's an anti-pattern to create new ClientSession, ClientProducer and
> ClientConsumer instances for each message you produce or consume. If
> you do this, your application will perform very poorly. This is
> discussed further in the section on performance tuning [Performance Tuning](perf-tuning.md).
## A simple example of using Core
Here's a very simple program using the core messaging API to send and
receive a message. Logically it's comprised of two sections: firstly
setting up the producer to write a message to an *addresss*, and
secondly, creating a *queue* for the consumer, creating the consumer and
*starting* it.
```java
ServerLocator locator = ActiveMQClient.createServerLocatorWithoutHA(new TransportConfiguration(
InVMConnectorFactory.class.getName()));
// In this simple example, we just use one session for both producing and receiving
ClientSessionFactory factory = locator.createClientSessionFactory();
ClientSession session = factory.createSession();
// A producer is associated with an address ...
ClientProducer producer = session.createProducer("example");
ClientMessage message = session.createMessage(true);
message.getBodyBuffer().writeString("Hello");
// We need a queue attached to the address ...
session.createQueue("example", "example", true);
// And a consumer attached to the queue ...
ClientConsumer consumer = session.createConsumer("example");
// Once we have a queue, we can send the message ...
producer.send(message);
// We need to start the session before we can -receive- messages ...
session.start();
ClientMessage msgReceived = consumer.receive();
System.out.println("message = " + msgReceived.getBodyBuffer().readString());
session.close();
```

View File

@ -1,186 +1,193 @@
# Using JMS
Although Apache ActiveMQ Artemis provides a JMS agnostic messaging API, many users will
be more comfortable using JMS.
Although Apache ActiveMQ Artemis provides a JMS agnostic messaging API, many
users will be more comfortable using JMS.
JMS is a very popular API standard for messaging, and most messaging
systems provide a JMS API. If you are completely new to JMS we suggest
you follow the [Oracle JMS tutorial](https://docs.oracle.com/javaee/7/tutorial/partmessaging.htm) -
a full JMS tutorial is out of scope for this guide.
JMS is a very popular API standard for messaging, and most messaging systems
provide a JMS API. If you are completely new to JMS we suggest you follow the
[Oracle JMS
tutorial](https://docs.oracle.com/javaee/7/tutorial/partmessaging.htm) - a full
JMS tutorial is out of scope for this guide.
Apache ActiveMQ Artemis also ships with a wide range of examples, many of which
demonstrate JMS API usage. A good place to start would be to play around
with the simple JMS Queue and Topic example, but we also provide
examples for many other parts of the JMS API. A full description of the
examples is available in [Examples](examples.md).
demonstrate JMS API usage. A good place to start would be to play around with
the simple JMS Queue and Topic example, but we also provide examples for many
other parts of the JMS API. A full description of the examples is available in
[Examples](examples.md).
In this section we'll go through the main steps in configuring the
server for JMS and creating a simple JMS program. We'll also show how to
configure and use JNDI, and also how to use JMS with Apache ActiveMQ Artemis without
using any JNDI.
In this section we'll go through the main steps in configuring the server for
JMS and creating a simple JMS program. We'll also show how to configure and use
JNDI, and also how to use JMS with Apache ActiveMQ Artemis without using any
JNDI.
## A simple ordering system
For this chapter we're going to use a very simple ordering system as our
example. It is a somewhat contrived example because of its extreme
simplicity, but it serves to demonstrate the very basics of setting up
and using JMS.
example. It is a somewhat contrived example because of its extreme simplicity,
but it serves to demonstrate the very basics of setting up and using JMS.
We will have a single JMS Queue called `OrderQueue`, and we will have a
single `MessageProducer` sending an order message to the queue and a
single `MessageConsumer` consuming the order message from the queue.
We will have a single JMS Queue called `OrderQueue`, and we will have a single
`MessageProducer` sending an order message to the queue and a single
`MessageConsumer` consuming the order message from the queue.
The queue will be a `durable` queue, i.e. it will survive a server
restart or crash. We also want to pre-deploy the queue, i.e. specify the
queue in the server configuration so it is created automatically
without us having to explicitly create it from the client.
The queue will be a `durable` queue, i.e. it will survive a server restart or
crash. We also want to pre-deploy the queue, i.e. specify the queue in the
server configuration so it is created automatically without us having to
explicitly create it from the client.
## JNDI Configuration
## JNDI
The JMS specification establishes the convention that *administered
objects* (i.e. JMS queue, topic and connection factory instances) are
made available via the JNDI API. Brokers are free to implement JNDI as
they see fit assuming the implementation fits the API. Apache ActiveMQ Artemis does not
have a JNDI server. Rather, it uses a client-side JNDI implementation
that relies on special properties set in the environment to construct
the appropriate JMS objects. In other words, no objects are stored in
JNDI on the Apache ActiveMQ Artemis server, instead they are simply instantiated on the
client based on the provided configuration. Let's look at the different
kinds of administered objects and how to configure them.
The JMS specification establishes the convention that *administered objects*
(i.e. JMS queue, topic and connection factory instances) are made available via
the JNDI API. Brokers are free to implement JNDI as they see fit assuming the
implementation fits the API. Apache ActiveMQ Artemis does not have a JNDI
server. Rather, it uses a client-side JNDI implementation that relies on
special properties set in the environment to construct the appropriate JMS
objects. In other words, no objects are stored in JNDI on the Apache ActiveMQ
Artemis server, instead they are simply instantiated on the client based on the
provided configuration. Let's look at the different kinds of administered
objects and how to configure them.
> **Note**
> **Note:**
>
> The following configuration properties *are strictly required when
> Apache ActiveMQ Artemis is running in stand-alone mode*. When Apache ActiveMQ Artemis is integrated
> to an application server (e.g. Wildfly) the application server itself
> will almost certainly provide a JNDI client with its own properties.
> The following configuration properties *are strictly required when Apache
> ActiveMQ Artemis is running in stand-alone mode*. When Apache ActiveMQ
> Artemis is integrated to an application server (e.g. Wildfly) the application
> server itself will almost certainly provide a JNDI client with its own
> properties.
### ConnectionFactory JNDI
A JMS connection factory is used by the client to make connections to
the server. It knows the location of the server it is connecting to, as
well as many other configuration parameters.
A JMS connection factory is used by the client to make connections to the
server. It knows the location of the server it is connecting to, as well as
many other configuration parameters.
Here's a simple example of the JNDI context environment for a client
looking up a connection factory to access an *embedded* instance of
Apache ActiveMQ Artemis:
Here's a simple example of the JNDI context environment for a client looking up
a connection factory to access an *embedded* instance of Apache ActiveMQ
Artemis:
java.naming.factory.initial=org.apache.activemq.artemis.jndi.ActiveMQInitialContextFactory
connectionFactory.invmConnectionFactory=vm://0
```properties
java.naming.factory.initial=org.apache.activemq.artemis.jndi.ActiveMQInitialContextFactory
connectionFactory.invmConnectionFactory=vm://0
```
In this instance we have created a connection factory that is bound to
`invmConnectionFactory`, any entry with prefix `connectionFactory.` will
create a connection factory.
`invmConnectionFactory`, any entry with prefix `connectionFactory.` will create
a connection factory.
In certain situations there could be multiple server instances running
within a particular JVM. In that situation each server would typically
have an InVM acceptor with a unique server-ID. A client using JMS and
JNDI can account for this by specifying a connction factory for each
server, like so:
In certain situations there could be multiple server instances running within a
particular JVM. In that situation each server would typically have an InVM
acceptor with a unique server-ID. A client using JMS and JNDI can account for
this by specifying a connction factory for each server, like so:
java.naming.factory.initial=org.apache.activemq.artemis.jndi.ActiveMQInitialContextFactory
connectionFactory.invmConnectionFactory0=vm://0
connectionFactory.invmConnectionFactory1=vm://1
connectionFactory.invmConnectionFactory2=vm://2
```properties
java.naming.factory.initial=org.apache.activemq.artemis.jndi.ActiveMQInitialContextFactory
connectionFactory.invmConnectionFactory0=vm://0
connectionFactory.invmConnectionFactory1=vm://1
connectionFactory.invmConnectionFactory2=vm://2
```
Here is a list of all the supported URL schemes:
- `vm`
- `vm`
- `tcp`
- `udp`
- `jgroups`
- `tcp`
Most clients won't be connecting to an embedded broker. Clients will most
commonly connect across a network a remote broker. Here's a simple example of a
client configuring a connection factory to connect to a remote broker running
on myhost:5445:
- `udp`
- `jgroups`
```properties
java.naming.factory.initial=org.apache.activemq.artemis.jndi.ActiveMQInitialContextFactory
connectionFactory.ConnectionFactory=tcp://myhost:5445
```
Most clients won't be connecting to an embedded broker. Clients will
most commonly connect across a network a remote broker. Here's a simple
example of a client configuring a connection factory to connect to a
remote broker running on myhost:5445:
java.naming.factory.initial=org.apache.activemq.artemis.jndi.ActiveMQInitialContextFactory
connectionFactory.ConnectionFactory=tcp://myhost:5445
In the example above the client is using the `tcp` scheme for the
provider URL. A client may also specify multiple comma-delimited
host:port combinations in the URL (e.g.
`(tcp://remote-host1:5445,remote-host2:5445)`). Whether there is one or
many host:port combinations in the URL they are treated as the *initial
In the example above the client is using the `tcp` scheme for the provider URL.
A client may also specify multiple comma-delimited host:port combinations in
the URL (e.g. `(tcp://remote-host1:5445,remote-host2:5445)`). Whether there is
one or many host:port combinations in the URL they are treated as the *initial
connector(s)* for the underlying connection.
The `udp` scheme is also supported which should use a host:port
combination that matches the `group-address` and `group-port` from the
corresponding `broadcast-group` configured on the ActiveMQ Artemis server(s).
The `udp` scheme is also supported which should use a host:port combination
that matches the `group-address` and `group-port` from the corresponding
`broadcast-group` configured on the ActiveMQ Artemis server(s).
Each scheme has a specific set of properties which can be set using the
traditional URL query string format (e.g.
`scheme://host:port?key1=value1&key2=value2`) to customize the
underlying transport mechanism. For example, if a client wanted to
connect to a remote server using TCP and SSL it would create a connection
factory like so, `tcp://remote-host:5445?ssl-enabled=true`.
`scheme://host:port?key1=value1&key2=value2`) to customize the underlying
transport mechanism. For example, if a client wanted to connect to a remote
server using TCP and SSL it would create a connection factory like so,
`tcp://remote-host:5445?ssl-enabled=true`.
All the properties available for the `tcp` scheme are described in [the
documentation regarding the Netty
transport](configuring-transports.md#configuring-the-netty-transport).
Note if you are using the `tcp` scheme and multiple addresses then a query
can be applied to all the url's or just to an individual connector, so where
you have
Note if you are using the `tcp` scheme and multiple addresses then a query can
be applied to all the url's or just to an individual connector, so where you
have
- `(tcp://remote-host1:5445?httpEnabled=true,remote-host2:5445?httpEnabled=true)?clientID=1234`
- `(tcp://remote-host1:5445?httpEnabled=true,remote-host2:5445?httpEnabled=true)?clientID=1234`
then the `httpEnabled` property is only set on the individual connectors where as the `clientId`
is set on the actual connection factory. Any connector specific properties set on the whole
URI will be applied to all the connectors.
then the `httpEnabled` property is only set on the individual connectors where
as the `clientId` is set on the actual connection factory. Any connector
specific properties set on the whole URI will be applied to all the connectors.
The `udp` scheme supports 4 properties:
- `localAddress` - If you are running with multiple network
interfaces on the same machine, you may want to specify that the
discovery group listens only only a specific interface. To do this
you can specify the interface address with this parameter.
- `localAddress` - If you are running with multiple network
interfaces on the same machine, you may want to specify that the
discovery group listens only only a specific interface. To do this
you can specify the interface address with this parameter.
- `localPort` - If you want to specify a local port to which the
datagram socket is bound you can specify it here. Normally you would
just use the default value of -1 which signifies that an anonymous
port should be used. This parameter is always specified in
conjunction with `localAddress`.
- `localPort` - If you want to specify a local port to which the
datagram socket is bound you can specify it here. Normally you would
just use the default value of -1 which signifies that an anonymous
port should be used. This parameter is always specified in
conjunction with `localAddress`.
- `refreshTimeout` - This is the period the discovery group waits
after receiving the last broadcast from a particular server before
removing that servers connector pair entry from its list. You would
normally set this to a value significantly higher than the
broadcast-period on the broadcast group otherwise servers might
intermittently disappear from the list even though they are still
broadcasting due to slight differences in timing. This parameter is
optional, the default value is 10000 milliseconds (10 seconds).
- `refreshTimeout` - This is the period the discovery group waits after
receiving the last broadcast from a particular server before removing that
servers connector pair entry from its list. You would normally set this to a
value significantly higher than the broadcast-period on the broadcast group
otherwise servers might intermittently disappear from the list even though they
are still broadcasting due to slight differences in timing. This parameter is
optional, the default value is 10000 milliseconds (10 seconds).
- `discoveryInitialWaitTimeout` - If the connection factory is used
immediately after creation then it may not have had enough time to
received broadcasts from all the nodes in the cluster. On first
usage, the connection factory will make sure it waits this long
since creation before creating the first connection. The default
value for this parameter is 10000 milliseconds.
- `discoveryInitialWaitTimeout` - If the connection factory is used immediately
after creation then it may not have had enough time to received broadcasts
from all the nodes in the cluster. On first usage, the connection factory will
make sure it waits this long since creation before creating the first
connection. The default value for this parameter is 10000 milliseconds.
Lastly, the `jgroups` scheme is supported which provides an alternative
to the `udp` scheme for server discovery. The URL pattern is either
Lastly, the `jgroups` scheme is supported which provides an alternative to the
`udp` scheme for server discovery. The URL pattern is either
`jgroups://channelName?file=jgroups-xml-conf-filename`
where`jgroups-xml-conf-filename` refers to an XML file on the classpath
that contains the JGroups configuration or it can be
`jgroups://channelName?properties=some-jgroups-properties`. In both instance the
`channelName` is the name given to the jgroups channel created.
where`jgroups-xml-conf-filename` refers to an XML file on the classpath that
contains the JGroups configuration or it can be
`jgroups://channelName?properties=some-jgroups-properties`. In both instance
the `channelName` is the name given to the jgroups channel created.
The `refreshTimeout` and `discoveryInitialWaitTimeout` properties
are supported just like with `udp`.
The `refreshTimeout` and `discoveryInitialWaitTimeout` properties are supported
just like with `udp`.
The default type for the default connection factory is of type `javax.jms.ConnectionFactory`.
This can be changed by setting the type like so
The default type for the default connection factory is of type
`javax.jms.ConnectionFactory`. This can be changed by setting the type like so
java.naming.factory.initial=org.apache.activemq.artemis.jndi.ActiveMQInitialContextFactory
java.naming.provider.url=tcp://localhost:5445?type=CF
In this example it is still set to the default, below shows a list of types that can be set.
```properties
java.naming.factory.initial=org.apache.activemq.artemis.jndi.ActiveMQInitialContextFactory
java.naming.provider.url=tcp://localhost:5445?type=CF
```
In this example it is still set to the default, below shows a list of types
that can be set.
#### Configuration for Connection Factory Types
@ -195,42 +202,46 @@ TOPIC_XA_CF | javax.jms.XATopicConnectionFactory
### Destination JNDI
JMS destinations are also typically looked up via JNDI. As with
connection factories, destinations can be configured using special
properties in the JNDI context environment. The property *name* should
follow the pattern: `queue.<jndi-binding>` or `topic.<jndi-binding>`.
The property *value* should be the name of the queue hosted by the
Apache ActiveMQ Artemis server. For example, if the server had a JMS queue configured
like so:
JMS destinations are also typically looked up via JNDI. As with connection
factories, destinations can be configured using special properties in the JNDI
context environment. The property *name* should follow the pattern:
`queue.<jndi-binding>` or `topic.<jndi-binding>`. The property *value* should
be the name of the queue hosted by the Apache ActiveMQ Artemis server. For
example, if the server had a JMS queue configured like so:
```xml
<queue name="OrderQueue"/>
<address name="OrderQueue">
<queue name="OrderQueue"/>
</address>
```
And if the client wanted to bind this queue to "queues/OrderQueue" then
the JNDI properties would be configured like so:
And if the client wanted to bind this queue to "queues/OrderQueue" then the
JNDI properties would be configured like so:
java.naming.factory.initial=org.apache.activemq.artemis.jndi.ActiveMQInitialContextFactory
java.naming.provider.url=tcp://myhost:5445
queue.queues/OrderQueue=OrderQueue
```properties
java.naming.factory.initial=org.apache.activemq.artemis.jndi.ActiveMQInitialContextFactory
java.naming.provider.url=tcp://myhost:5445
queue.queues/OrderQueue=OrderQueue
```
It is also possible to look-up JMS destinations which haven't been
configured explicitly in the JNDI context environment. This is possible
using `dynamicQueues/` or `dynamicTopics/` in the look-up string. For
example, if the client wanted to look-up the aforementioned "OrderQueue"
it could do so simply by using the string "dynamicQueues/OrderQueue".
Note, the text that follows `dynamicQueues/` or `dynamicTopics/` must
correspond *exactly* to the name of the destination on the server.
It is also possible to look-up JMS destinations which haven't been configured
explicitly in the JNDI context environment. This is possible using
`dynamicQueues/` or `dynamicTopics/` in the look-up string. For example, if the
client wanted to look-up the aforementioned "OrderQueue" it could do so simply
by using the string "dynamicQueues/OrderQueue". Note, the text that follows
`dynamicQueues/` or `dynamicTopics/` must correspond *exactly* to the name of
the destination on the server.
### The code
Here's the code for the example:
First we'll create a JNDI initial context from which to lookup our JMS
objects. If the above properties are set in `jndi.properties` and it is
on the classpath then any new, empty `InitialContext` will be
initialized using those properties:
```java
First we'll create a JNDI initial context from which to lookup our JMS objects.
If the above properties are set in `jndi.properties` and it is on the classpath
then any new, empty `InitialContext` will be initialized using those
properties:
```java
InitialContext ic = new InitialContext();
//Now we'll look up the connection factory from which we can create
@ -280,35 +291,36 @@ see the examples directory in the distribution.
> **Warning**
>
> Please note that JMS connections, sessions, producers and consumers
> are *designed to be re-used*.
> Please note that JMS connections, sessions, producers and consumers are
> *designed to be re-used*.
>
> It is an anti-pattern to create new connections, sessions, producers
> and consumers for each message you produce or consume. If you do this,
> your application will perform very poorly. This is discussed further
> in the section on performance tuning [Performance Tuning](perf-tuning.md).
> It is an anti-pattern to create new connections, sessions, producers and
> consumers for each message you produce or consume. If you do this, your
> application will perform very poorly. This is discussed further in the
> section on performance tuning [Performance Tuning](perf-tuning.md).
## Directly instantiating JMS Resources without using JNDI
Although it is a very common JMS usage pattern to lookup JMS
*Administered Objects* (that's JMS Queue, Topic and ConnectionFactory
instances) from JNDI, in some cases you just think "Why do I need JNDI?
Why can't I just instantiate these objects directly?"
Although it is a very common JMS usage pattern to lookup JMS *Administered
Objects* (that's JMS Queue, Topic and ConnectionFactory instances) from JNDI,
in some cases you just think "Why do I need JNDI? Why can't I just instantiate
these objects directly?"
With Apache ActiveMQ Artemis you can do exactly that. Apache ActiveMQ Artemis supports the direct
instantiation of JMS Queue, Topic and ConnectionFactory instances, so
you don't have to use JNDI at all.
With Apache ActiveMQ Artemis you can do exactly that. Apache ActiveMQ Artemis
supports the direct instantiation of JMS Queue, Topic and ConnectionFactory
instances, so you don't have to use JNDI at all.
>For a full working example of direct instantiation please look at the
>"Instantiate JMS Objects Directly" example under the JMS section of the
>examples. See the [Examples](examples.md) section for more info.
> For a full working example of direct instantiation please look at the
> [Instantiate JMS Objects
> Directly](examples.md#instantiate-jms-objects-directly) example under the JMS
> section of the examples.
Here's our simple example, rewritten to not use JNDI at all:
We create the JMS ConnectionFactory object via the ActiveMQJMSClient
Utility class, note we need to provide connection parameters and specify
which transport we are using, for more information on connectors please
see [Configuring the Transport](configuring-transports.md).
We create the JMS ConnectionFactory object via the ActiveMQJMSClient Utility
class, note we need to provide connection parameters and specify which
transport we are using, for more information on connectors please see
[Configuring the Transport](configuring-transports.md).
```java
TransportConfiguration transportConfiguration = new TransportConfiguration(NettyConnectorFactory.class.getName());
@ -355,35 +367,33 @@ System.out.println("Got order: " + receivedMessage.getText());
## Setting The Client ID
This represents the client id for a JMS client and is needed for
creating durable subscriptions. It is possible to configure this on the
connection factory and can be set via the `clientId` element. Any
connection created by this connection factory will have this set as its
client id.
This represents the client id for a JMS client and is needed for creating
durable subscriptions. It is possible to configure this on the connection
factory and can be set via the `clientId` element. Any connection created by
this connection factory will have this set as its client id.
## Setting The Batch Size for DUPS_OK
When the JMS acknowledge mode is set to `DUPS_OK` it is possible to
configure the consumer so that it sends acknowledgements in batches
rather that one at a time, saving valuable bandwidth. This can be
configured via the connection factory via the `dupsOkBatchSize`
element and is set in bytes. The default is 1024 \* 1024 bytes = 1 MiB.
When the JMS acknowledge mode is set to `DUPS_OK` it is possible to configure
the consumer so that it sends acknowledgements in batches rather that one at a
time, saving valuable bandwidth. This can be configured via the connection
factory via the `dupsOkBatchSize` element and is set in bytes. The default is
1024 \* 1024 bytes = 1 MiB.
## Setting The Transaction Batch Size
When receiving messages in a transaction it is possible to configure the
consumer to send acknowledgements in batches rather than individually
saving valuable bandwidth. This can be configured on the connection
factory via the `transactionBatchSize` element and is set in bytes.
The default is 1024 \* 1024.
consumer to send acknowledgements in batches rather than individually saving
valuable bandwidth. This can be configured on the connection factory via the
`transactionBatchSize` element and is set in bytes. The default is 1024 \*
1024.
## Setting The Destination Cache
Many frameworks such as Spring resolve the destination by name on every operation,
this can cause a performance issue and extra calls to the broker,
in a scenario where destinations (addresses) are permanent broker side,
such as they are managed by a platform or operations team.
using `destinationCache` element, you can toggle on the destination cache
to improve the performance and reduce the calls to the broker.
This should not be used if destinations (addresses) are not permanent broker side,
as in dynamic creation/deletion.
Many frameworks such as Spring resolve the destination by name on every
operation, this can cause a performance issue and extra calls to the broker, in
a scenario where destinations (addresses) are permanent broker side, such as
they are managed by a platform or operations team. using `destinationCache`
element, you can toggle on the destination cache to improve the performance and
reduce the calls to the broker. This should not be used if destinations
(addresses) are not permanent broker side, as in dynamic creation/deletion.

View File

@ -1,20 +1,22 @@
# Using the Server
This chapter will familiarise you with how to use the Apache ActiveMQ Artemis server.
This chapter will familiarise you with how to use the Apache ActiveMQ Artemis
server.
We'll show where it is, how to start and stop it, and we'll describe the
directory layout and what all the files are and what they do.
For the remainder of this chapter when we talk about the Apache ActiveMQ Artemis server
we mean the Apache ActiveMQ Artemis standalone server, in its default configuration
with a JMS Service enabled.
For the remainder of this chapter when we talk about the Apache ActiveMQ
Artemis server we mean the Apache ActiveMQ Artemis standalone server, in its
default configuration with a JMS Service enabled.
This document will refer to the full path of the directory where the ActiveMQ
distribution has been extracted to as `${ARTEMIS_HOME}` directory.
## Installation
After downloading the distribution, the following highlights some important folders on the distribution:
After downloading the distribution, the following highlights some important
folders on the distribution:
|___ bin
|
@ -36,48 +38,52 @@ After downloading the distribution, the following highlights some important fold
|___ user-manual
- `bin` - binaries and scripts needed to run ActiveMQ Artemis.
- `bin` - binaries and scripts needed to run ActiveMQ Artemis.
- `examples` - All manner of examples. Please refer to the [examples](examples.md)
chapter for details on how to run them.
- `examples` - All manner of examples. Please refer to the [examples](examples.md)
chapter for details on how to run them.
- `lib` - jars and libraries needed to run ActiveMQ Artemis
- `lib` - jars and libraries needed to run ActiveMQ Artemis
- `schema` - XML Schemas used to validate ActiveMQ Artemis configuration files
- `schema` - XML Schemas used to validate ActiveMQ Artemis configuration files
- `web` - The folder where the web context is loaded when the broker runs.
- `web` - The folder where the web context is loaded when the broker runs.
- `api` - The api documentation is placed under the web folder.
- `api` - The api documentation is placed under the web folder.
- `user-manual` - The user manual is placed under the web folder.
- `user-manual` - The user manual is placed under the web folder.
## Creating a Broker Instance
A broker instance is the directory containing all the configuration and runtime
data, such as logs and data files, associated with a broker process. It is recommended that
you do *not* create the instance directory under `${ARTEMIS_HOME}`. This separation is
encouraged so that you can more easily upgrade when the next version of ActiveMQ Artemis is released.
data, such as logs and data files, associated with a broker process. It is
recommended that you do *not* create the instance directory under
`${ARTEMIS_HOME}`. This separation is encouraged so that you can more easily
upgrade when the next version of ActiveMQ Artemis is released.
On Unix systems, it is a common convention to store this kind of runtime data under
the `/var/lib` directory. For example, to create an instance at '/var/lib/mybroker', run
the following commands in your command line shell:
On Unix systems, it is a common convention to store this kind of runtime data
under the `/var/lib` directory. For example, to create an instance at
'/var/lib/mybroker', run the following commands in your command line shell:
cd /var/lib
${ARTEMIS_HOME}/bin/artemis create mybroker
```sh
cd /var/lib
${ARTEMIS_HOME}/bin/artemis create mybroker
```
A broker instance directory will contain the following sub directories:
* `bin`: holds execution scripts associated with this instance.
* `etc`: hold the instance configuration files
* `data`: holds the data files used for storing persistent messages
* `log`: holds rotating log files
* `tmp`: holds temporary files that are safe to delete between broker runs
- `bin`: holds execution scripts associated with this instance.
- `etc`: hold the instance configuration files
- `data`: holds the data files used for storing persistent messages
- `log`: holds rotating log files
- `tmp`: holds temporary files that are safe to delete between broker runs
At this point you may want to adjust the default configuration located in
the `etc` directory.
At this point you may want to adjust the default configuration located in the
`etc` directory.
### Options
There are several options you can use when creating an instance.
For a full list of updated properties always use:
@ -288,35 +294,35 @@ For a full list of updated properties always use:
Path must be writable.
```
Some of these properties may be mandatory in certain configurations and the system may ask you for additional input.
Some of these properties may be mandatory in certain configurations and the
system may ask you for additional input.
```
./artemis create /usr/server
Creating ActiveMQ Artemis instance at: /user/server
./artemis create /usr/server
Creating ActiveMQ Artemis instance at: /user/server
--user: is a mandatory property!
Please provide the default username:
admin
--user: is a mandatory property!
Please provide the default username:
admin
--password: is mandatory with this configuration:
Please provide the default password:
--password: is mandatory with this configuration:
Please provide the default password:
--allow-anonymous | --require-login: is a mandatory property!
Allow anonymous access?, valid values are Y,N,True,False
y
--allow-anonymous | --require-login: is a mandatory property!
Allow anonymous access?, valid values are Y,N,True,False
y
Auto tuning journal ...
done! Your system can make 0.34 writes per millisecond, your journal-buffer-timeout will be 2956000
Auto tuning journal ...
done! Your system can make 0.34 writes per millisecond, your journal-buffer-timeout will be 2956000
You can now start the broker by executing:
You can now start the broker by executing:
"/user/server/bin/artemis" run
"/user/server/bin/artemis" run
Or you can run the broker in the background using:
Or you can run the broker in the background using:
"/user/server/bin/artemis-service" start
"/user/server/bin/artemis-service" start
```
@ -325,22 +331,26 @@ Some of these properties may be mandatory in certain configurations and the syst
Assuming you created the broker instance under `/var/lib/mybroker` all you need
to do start running the broker instance is execute:
/var/lib/mybroker/bin/artemis run
```sh
/var/lib/mybroker/bin/artemis run
```
Now that the broker is running, you can optionally run some of the included
examples to verify the the broker is running properly.
To stop the Apache ActiveMQ Artemis instance you will use the same `artemis` script, but with
the `stop argument`. Example:
To stop the Apache ActiveMQ Artemis instance you will use the same `artemis`
script, but with the `stop` argument. Example:
/var/lib/mybroker/bin/artemis stop
```sh
/var/lib/mybroker/bin/artemis stop
```
Please note that Apache ActiveMQ Artemis requires a Java 7 or later runtime to run.
Please note that Apache ActiveMQ Artemis requires a Java 7 or later runtime to
run.
By default the `etc/bootstrap.xml` configuration is
used. The configuration can be changed e.g. by running
`./artemis run -- xml:path/to/bootstrap.xml` or another
config of your choosing.
By default the `etc/bootstrap.xml` configuration is used. The configuration can
be changed e.g. by running `./artemis run -- xml:path/to/bootstrap.xml` or
another config of your choosing.
Environment variables are used to provide ease of changing ports, hosts and
data directories used and can be found in `etc/artemis.profile` on linux and
@ -348,65 +358,64 @@ data directories used and can be found in `etc/artemis.profile` on linux and
## Server JVM settings
The run scripts set some JVM settings for tuning the garbage collection
policy and heap size. We recommend using a parallel garbage collection
algorithm to smooth out latency and minimise large GC pauses.
The run scripts set some JVM settings for tuning the garbage collection policy
and heap size. We recommend using a parallel garbage collection algorithm to
smooth out latency and minimise large GC pauses.
By default Apache ActiveMQ Artemis runs in a maximum of 1GiB of RAM. To increase the
memory settings change the `-Xms` and `-Xmx` memory settings as you
would for any Java program.
By default Apache ActiveMQ Artemis runs in a maximum of 1GiB of RAM. To
increase the memory settings change the `-Xms` and `-Xmx` memory settings as
you would for any Java program.
If you wish to add any more JVM arguments or tune the existing ones, the
run scripts are the place to do it.
If you wish to add any more JVM arguments or tune the existing ones, the run
scripts are the place to do it.
## Library Path
If you're using the [Asynchronous IO Journal](libaio.md) on Linux,
you need to specify `java.library.path` as a property on your Java
options. This is done automatically in the scripts.
If you're using the [Asynchronous IO Journal](libaio.md) on Linux, you need to
specify `java.library.path` as a property on your Java options. This is done
automatically in the scripts.
If you don't specify `java.library.path` at your Java options then the
JVM will use the environment variable `LD_LIBRARY_PATH`.
If you don't specify `java.library.path` at your Java options then the JVM will
use the environment variable `LD_LIBRARY_PATH`.
You will need to make sure libaio is installed on Linux. For more information refer to the libaio chapter at
[Runtime Dependencies](libaio.html#runtime-dependencies)
You will need to make sure libaio is installed on Linux. For more information
refer to the [libaio chapter](libaio.html#runtime-dependencies).
## System properties
Apache ActiveMQ Artemis can take a system property on the command line for configuring
logging.
Apache ActiveMQ Artemis can take a system property on the command line for
configuring logging.
For more information on configuring logging, please see the section on
[Logging](logging.md).
## Configuration files
The configuration file used to bootstrap the server (e.g.
`bootstrap.xml` by default) references the specific broker configuration
files.
The configuration file used to bootstrap the server (e.g. `bootstrap.xml` by
default) references the specific broker configuration files.
- `broker.xml`. This is the main ActiveMQ
configuration file. All the parameters in this file are
described [here](configuration-index.md)
- `broker.xml`. This is the main ActiveMQ configuration file. All the
parameters in this file are described [here](configuration-index.md)
It is also possible to use system property substitution in all the
configuration files. by replacing a value with the name of a system
property. Here is an example of this with a connector configuration:
configuration files. by replacing a value with the name of a system property.
Here is an example of this with a connector configuration:
<connector name="netty">tcp://${activemq.remoting.netty.host:localhost}:${activemq.remoting.netty.port:61616}</connector>
```xml
<connector name="netty">tcp://${activemq.remoting.netty.host:localhost}:${activemq.remoting.netty.port:61616}</connector>
```
Here you can see we have replaced 2 values with system properties
`activemq.remoting.netty.host` and `activemq.remoting.netty.port`. These
values will be replaced by the value found in the system property if
there is one, if not they default back to localhost or 61616
respectively. It is also possible to not supply a default. i.e.
`${activemq.remoting.netty.host}`, however the system property *must* be
supplied in that case.
`activemq.remoting.netty.host` and `activemq.remoting.netty.port`. These values
will be replaced by the value found in the system property if there is one, if
not they default back to localhost or 61616 respectively. It is also possible
to not supply a default. i.e. `${activemq.remoting.netty.host}`, however the
system property *must* be supplied in that case.
### Bootstrap configuration file
The stand-alone server is basically a set of POJOs which are
instantiated by Airline commands.
The stand-alone server is basically a set of POJOs which are instantiated by
Airline commands.
The bootstrap file is very simple. Let's take a look at an example:
@ -425,48 +434,47 @@ The bootstrap file is very simple. Let's take a look at an example:
</broker>
```
- `server` - Instantiates a core server using the configuration file from the
`configuration` attribute. This is the main broker POJO necessary to do all
the real messaging work.
- `server` - Instantiates a core server using the configuration file from the
`configuration` attribute. This is the main broker POJO necessary to
do all the real messaging work.
- `jaas-security` - Configures security for the server. The `domain` attribute
refers to the relevant login module entry in `login.config`.
- `jaas-security` - Configures security for the server. The `domain` attribute
refers to the relevant login module entry in `login.config`.
- `web` - Configures an embedded Jetty instance to serve web applications like
the admin console.
- `web` - Configures an embedded Jetty instance to serve web applications like
the admin console.
### Broker configuration file
The configuration for the Apache ActiveMQ Artemis core server is contained in
`broker.xml`. This is what the FileConfiguration bean
uses to configure the messaging server.
`broker.xml`. This is what the FileConfiguration bean uses to configure the
messaging server.
There are many attributes which you can configure Apache ActiveMQ Artemis. In most
cases the defaults will do fine, in fact every attribute can be
defaulted which means a file with a single empty `configuration` element
is a valid configuration file. The different configuration will be
explained throughout the manual or you can refer to the configuration
reference [here](configuration-index.md).
There are many attributes which you can configure Apache ActiveMQ Artemis. In
most cases the defaults will do fine, in fact every attribute can be defaulted
which means a file with a single empty `configuration` element is a valid
configuration file. The different configuration will be explained throughout
the manual or you can refer to the configuration reference
[here](configuration-index.md).
## Windows Server
On windows you will have the option to run ActiveMQ Artemis as a service.
Just use the following command to install it:
On windows you will have the option to run ActiveMQ Artemis as a service. Just
use the following command to install it:
```
$ ./artemis-service.exe install
```
The create process should give you a hint of the available commands available for the artemis-service.exe
The create process should give you a hint of the available commands available
for the artemis-service.exe
## Adding Runtime Dependencies
Runtime dependencies like diverts, transformers, broker plugins, JDBC drivers,
password decoders, etc. must be accessible by the broker at runtime. Package
password decoders, etc. must be accessible by the broker at runtime. Package
the dependency in a jar, and put it on the broker's classpath. This can be done
by placing the jar file in the `lib` directory of the broker distribution itself
or in the `lib` directory of the broker instance. A broker instance does not have
a `lib` directory by default so it may need to be created. It should be on the
"top" level with the `bin`, `data`, `log`, etc. directories.
by placing the jar file in the `lib` directory of the broker distribution
itself or in the `lib` directory of the broker instance. A broker instance does
not have a `lib` directory by default so it may need to be created. It should
be on the "top" level with the `bin`, `data`, `log`, etc. directories.

View File

@ -4,9 +4,9 @@ This chapter provides the information for each release:
- A link to the full release notes which includes all issues resolved in the release.
- A brief list of "highlights."
- If necessary, specific steps required when upgrading from the previous version.
- _NOTE:_ If the upgrade spans multiple versions then the steps from each version need to be followed in order.
- _NOTE:_ Follow the general upgrade procedure outlined in the [Upgrading the Broker](upgrading.md)
chapter in addition to any version-specific upgrade instructions.
- **Note:** If the upgrade spans multiple versions then the steps from **each** version need to be followed in order.
- **Note:** Follow the general upgrade procedure outlined in the [Upgrading the Broker](upgrading.md)
chapter in addition to any version-specific upgrade instructions outlined here.
## 2.5.0
@ -58,7 +58,7 @@ Highlights:
```xml
<app url="console" war="console.war"/>
```
_NOTE:_ the Jolokia REST interface URL will now be at `http://<host>:<port>/console/jolokia`
**Note:** the Jolokia REST interface URL will now be at `http://<host>:<port>/console/jolokia`
## 2.3.0

View File

@ -8,7 +8,7 @@ receive any messages sent to addresses that match this, for instance
you create a consumer on this queue, this allows a consumer to consume
messages which are sent to a *hierarchy* of addresses.
> **Note**
> **Note:**
>
> In JMS terminology this allows "topic hierarchies" to be created.

View File

@ -26,11 +26,10 @@ under the License.
<acceptor name="in-vm">vm://0</acceptor>
</acceptors>
<!-- Other config -->
<security-settings>
<!--security for example queue-->
<security-setting match="#">
<permission roles="guest" type="createAddress"/>
<permission roles="guest" type="deleteAddress"/>
<permission roles="guest" type="createDurableQueue"/>
<permission roles="guest" type="deleteDurableQueue"/>
<permission roles="guest" type="createNonDurableQueue"/>
@ -39,13 +38,5 @@ under the License.
<permission roles="guest" type="send"/>
</security-setting>
</security-settings>
<addresses>
<address name="exampleQueue">
<anycast>
<queue name="jms.queue.exampleQueue"/>
</anycast>
</address>
</addresses>
</core>
</configuration>

View File

@ -49,16 +49,11 @@ under the License.
<bean id="EmbeddedJms" class="org.apache.activemq.artemis.integration.spring.SpringJmsBootstrap" init-method="start"
destroy-method="close">
<property name="SecurityManager" ref="securityManager"/>
<property name="SecurityManager" ref="securityManager"/>
</bean>
<bean id="connectionFactory" class="org.apache.activemq.artemis.jms.client.ActiveMQJMSConnectionFactory">
<constructor-arg value="false"/>
<constructor-arg>
<bean class="org.apache.activemq.artemis.api.core.TransportConfiguration">
<constructor-arg value="org.apache.activemq.artemis.core.remoting.impl.invm.InVMConnectorFactory"/>
</bean>
</constructor-arg>
<constructor-arg value="vm://0"/>
</bean>
<bean id="exampleQueue" class="org.apache.activemq.artemis.jms.client.ActiveMQQueue">