[AMQ-7502] Remove leveldb

This commit is contained in:
jbonofre 2021-03-12 08:53:53 +01:00
parent fc0999cc87
commit 52a2bd446a
152 changed files with 195 additions and 16657 deletions

5
.gitignore vendored
View File

@ -16,14 +16,9 @@ activemq-unit-tests/KahaDB
activemq-unit-tests/broker activemq-unit-tests/broker
activemq-unit-tests/derby.log activemq-unit-tests/derby.log
activemq-unit-tests/derbyDb activemq-unit-tests/derbyDb
activemq-unit-tests/LevelDB
activemq-unit-tests/networkedBroker activemq-unit-tests/networkedBroker
activemq-unit-tests/shared activemq-unit-tests/shared
activemq-data activemq-data
activemq-leveldb-store/.cache
activemq-leveldb-store/.cache-main
activemq-leveldb-store/.cache-tests
activemq-leveldb-store/.tmpBin
activemq-runtime-config/src/main/resources/activemq.xsd activemq-runtime-config/src/main/resources/activemq.xsd
activemq-amqp/amqp-trace.txt activemq-amqp/amqp-trace.txt
data/ data/

View File

@ -59,10 +59,6 @@
<groupId>${project.groupId}</groupId> <groupId>${project.groupId}</groupId>
<artifactId>activemq-jdbc-store</artifactId> <artifactId>activemq-jdbc-store</artifactId>
</dependency> </dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>activemq-leveldb-store</artifactId>
</dependency>
<dependency> <dependency>
<groupId>org.apache.geronimo.specs</groupId> <groupId>org.apache.geronimo.specs</groupId>
<artifactId>geronimo-annotation_1.0_spec</artifactId> <artifactId>geronimo-annotation_1.0_spec</artifactId>
@ -109,7 +105,6 @@
<include>${project.groupId}:activemq-mqtt</include> <include>${project.groupId}:activemq-mqtt</include>
<include>${project.groupId}:activemq-stomp</include> <include>${project.groupId}:activemq-stomp</include>
<include>${project.groupId}:activemq-kahadb-store</include> <include>${project.groupId}:activemq-kahadb-store</include>
<include>${project.groupId}:activemq-leveldb-store</include>
<include>${project.groupId}:activemq-jdbc-store</include> <include>${project.groupId}:activemq-jdbc-store</include>
<include>org.apache.activemq.protobuf:activemq-protobuf</include> <include>org.apache.activemq.protobuf:activemq-protobuf</include>
<include>org.fusesource.hawtbuf:hawtbuf</include> <include>org.fusesource.hawtbuf:hawtbuf</include>
@ -314,13 +309,6 @@
<classifier>sources</classifier> <classifier>sources</classifier>
<optional>true</optional> <optional>true</optional>
</dependency> </dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>activemq-leveldb-store</artifactId>
<version>${project.version}</version>
<classifier>sources</classifier>
<optional>true</optional>
</dependency>
<dependency> <dependency>
<groupId>org.apache.activemq.protobuf</groupId> <groupId>org.apache.activemq.protobuf</groupId>
<artifactId>activemq-protobuf</artifactId> <artifactId>activemq-protobuf</artifactId>

View File

@ -114,11 +114,6 @@
<artifactId>spring-context</artifactId> <artifactId>spring-context</artifactId>
<scope>test</scope> <scope>test</scope>
</dependency> </dependency>
<dependency>
<groupId>org.apache.activemq</groupId>
<artifactId>activemq-leveldb-store</artifactId>
<scope>test</scope>
</dependency>
<dependency> <dependency>
<groupId>org.apache.activemq.tooling</groupId> <groupId>org.apache.activemq.tooling</groupId>
<artifactId>activemq-junit</artifactId> <artifactId>activemq-junit</artifactId>

View File

@ -90,6 +90,17 @@
<artifactId>activemq-jaas</artifactId> <artifactId>activemq-jaas</artifactId>
<scope>test</scope> <scope>test</scope>
</dependency> </dependency>
<dependency>
<groupId>org.apache.activemq</groupId>
<artifactId>activemq-kahadb-store</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>${commons-lang-version}</version>
<scope>test</scope>
</dependency>
<dependency> <dependency>
<groupId>junit</groupId> <groupId>junit</groupId>
<artifactId>junit</artifactId> <artifactId>junit</artifactId>

View File

@ -1,122 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.bugs;
import static org.junit.Assert.assertNotNull;
import java.io.File;
import java.io.IOException;
import java.net.ServerSocket;
import javax.jms.Connection;
import javax.jms.DeliveryMode;
import javax.jms.JMSException;
import javax.jms.MessageConsumer;
import javax.jms.MessageProducer;
import javax.jms.Queue;
import javax.jms.Session;
import javax.net.ServerSocketFactory;
import org.apache.activemq.ActiveMQConnectionFactory;
import org.apache.activemq.broker.BrokerService;
import org.apache.activemq.broker.TransportConnector;
import org.apache.activemq.leveldb.LevelDBStore;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
public class AMQ5816Test {
private static BrokerService brokerService;
@Rule public TestName name = new TestName();
private File dataDirFile;
private String connectionURI;
@Before
public void setUp() throws Exception {
dataDirFile = new File("target/" + name.getMethodName());
brokerService = new BrokerService();
brokerService.setBrokerName("LevelDBBroker");
brokerService.setPersistent(true);
brokerService.setUseJmx(false);
brokerService.setAdvisorySupport(false);
brokerService.setDeleteAllMessagesOnStartup(true);
brokerService.setDataDirectoryFile(dataDirFile);
TransportConnector connector = brokerService.addConnector("http://0.0.0.0:" + getFreePort());
LevelDBStore persistenceFactory = new LevelDBStore();
persistenceFactory.setDirectory(dataDirFile);
brokerService.setPersistenceAdapter(persistenceFactory);
brokerService.start();
brokerService.waitUntilStarted();
connectionURI = connector.getPublishableConnectString();
}
/**
* @throws java.lang.Exception
*/
@After
public void tearDown() throws Exception {
brokerService.stop();
brokerService.waitUntilStopped();
}
@Test
public void testSendPersistentMessage() throws JMSException {
ActiveMQConnectionFactory factory = new ActiveMQConnectionFactory(connectionURI);
Connection connection = factory.createConnection();
connection.start();
Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
Queue queue = session.createQueue(name.getMethodName());
MessageProducer producer = session.createProducer(queue);
MessageConsumer consumer = session.createConsumer(queue);
producer.setDeliveryMode(DeliveryMode.PERSISTENT);
producer.send(session.createTextMessage());
assertNotNull(consumer.receive(5000));
}
protected int getFreePort() {
int port = 8161;
ServerSocket ss = null;
try {
ss = ServerSocketFactory.getDefault().createServerSocket(0);
port = ss.getLocalPort();
} catch (IOException e) { // ignore
} finally {
try {
if (ss != null ) {
ss.close();
}
} catch (IOException e) { // ignore
}
}
return port;
}
}

View File

@ -1,97 +0,0 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<beans
xmlns="http://www.springframework.org/schema/beans"
xmlns:amq="http://activemq.apache.org/schema/core"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-2.0.xsd
http://activemq.apache.org/schema/core http://activemq.apache.org/schema/core/activemq-core.xsd">
<broker xmlns="http://activemq.apache.org/schema/core"
brokerName="${broker-name}"
dataDirectory="${data}"
start="false">
<destinationPolicy>
<policyMap>
<policyEntries>
<policyEntry topic=">" producerFlowControl="true">
<pendingMessageLimitStrategy>
<constantPendingMessageLimitStrategy limit="1000"/>
</pendingMessageLimitStrategy>
</policyEntry>
<policyEntry queue=">" producerFlowControl="true" memoryLimit="1mb">
</policyEntry>
</policyEntries>
</policyMap>
</destinationPolicy>
<managementContext>
<managementContext createConnector="false"/>
</managementContext>
<persistenceAdapter>
<levelDB directory="${data}/leveldb"/>
</persistenceAdapter>
<plugins>
<jaasAuthenticationPlugin configuration="karaf" />
<authorizationPlugin>
<map>
<authorizationMap groupClass="org.apache.karaf.jaas.boot.principal.RolePrincipal">
<authorizationEntries>
<authorizationEntry queue=">" read="admin" write="admin" admin="admin"/>
<authorizationEntry topic=">" read="admin" write="admin" admin="admin"/>
<authorizationEntry topic="ActiveMQ.Advisory.>" read="admin" write="admin" admin="admin"/>
</authorizationEntries>
<tempDestinationAuthorizationEntry>
<tempDestinationAuthorizationEntry read="admin" write="admin" admin="admin"/>
</tempDestinationAuthorizationEntry>
</authorizationMap>
</map>
</authorizationPlugin>
</plugins>
<systemUsage>
<systemUsage>
<memoryUsage>
<memoryUsage limit="64 mb"/>
</memoryUsage>
<storeUsage>
<storeUsage limit="100 gb"/>
</storeUsage>
<tempUsage>
<tempUsage limit="50 gb"/>
</tempUsage>
</systemUsage>
</systemUsage>
<transportConnectors>
<transportConnector name="openwire" uri="tcp://0.0.0.0:61616?maximumConnections=1000"/>
<transportConnector name="http" uri="http://0.0.0.0:61626"/>
<transportConnector name="amqp" uri="amqp://0.0.0.0:61636?transport.transformer=jms"/>
<transportConnector name="ws" uri="ws://0.0.0.0:61646"/>
<transportConnector name="mqtt" uri="ws://0.0.0.0:61656"/>
</transportConnectors>
</broker>
</beans>

View File

@ -71,6 +71,5 @@
<bundle dependency="true">mvn:com.fasterxml.jackson.core/jackson-core/${jackson-version}</bundle> <bundle dependency="true">mvn:com.fasterxml.jackson.core/jackson-core/${jackson-version}</bundle>
<bundle dependency="true">mvn:com.fasterxml.jackson.core/jackson-databind/${jackson-databind-version}</bundle> <bundle dependency="true">mvn:com.fasterxml.jackson.core/jackson-databind/${jackson-databind-version}</bundle>
<bundle dependency="true">mvn:com.fasterxml.jackson.core/jackson-annotations/${jackson-version}</bundle> <bundle dependency="true">mvn:com.fasterxml.jackson.core/jackson-annotations/${jackson-version}</bundle>
<bundle dependency="true">mvn:org.scala-lang/scala-library/${scala-version}</bundle>
</feature> </feature>
</features> </features>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

View File

@ -1,639 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.activemq</groupId>
<artifactId>activemq-parent</artifactId>
<version>5.17.0-SNAPSHOT</version>
</parent>
<artifactId>activemq-leveldb-store</artifactId>
<packaging>jar</packaging>
<name>ActiveMQ :: LevelDB Store</name>
<description>ActiveMQ LevelDB Store Implementation</description>
<dependencies>
<!-- for scala support -->
<dependency>
<groupId>org.scala-lang</groupId>
<artifactId>scala-library</artifactId>
<version>${scala-version}</version>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.apache.activemq</groupId>
<artifactId>activemq-broker</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.fusesource.hawtbuf</groupId>
<artifactId>hawtbuf-proto</artifactId>
<version>${hawtbuf-version}</version>
</dependency>
<dependency>
<groupId>org.fusesource.hawtdispatch</groupId>
<artifactId>hawtdispatch-scala-2.11</artifactId>
<version>${hawtdispatch-version}</version>
</dependency>
<dependency>
<groupId>org.iq80.leveldb</groupId>
<artifactId>leveldb-api</artifactId>
<version>${leveldb-version}</version>
</dependency>
<dependency>
<groupId>org.iq80.leveldb</groupId>
<artifactId>leveldb</artifactId>
<version>${leveldb-version}</version>
</dependency>
<dependency>
<groupId>org.fusesource.leveldbjni</groupId>
<artifactId>leveldbjni</artifactId>
<version>${leveldbjni-version}</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>${guava-version}</version>
</dependency>
<!-- Lets not include the JNI libs for now so that we can harden the pure java driver more -->
<!--
<dependency>
<groupId>org.fusesource.leveldbjni</groupId>
<artifactId>leveldbjni-osx</artifactId>
<version>${leveldbjni-version}</version>
</dependency>
<dependency>
<groupId>org.fusesource.leveldbjni</groupId>
<artifactId>leveldbjni-linux32</artifactId>
<version>${leveldbjni-version}</version>
</dependency>
<dependency>
<groupId>org.fusesource.leveldbjni</groupId>
<artifactId>leveldbjni-linux64</artifactId>
<version>${leveldbjni-version}</version>
</dependency>
<dependency>
<groupId>org.fusesource.leveldbjni</groupId>
<artifactId>leveldbjni-win32</artifactId>
<version>${leveldbjni-version}</version>
</dependency>
<dependency>
<groupId>org.fusesource.leveldbjni</groupId>
<artifactId>leveldbjni-win64</artifactId>
<version>${leveldbjni-version}</version>
</dependency>
-->
<!-- For Replication -->
<dependency>
<groupId>org.fusesource.hawtdispatch</groupId>
<artifactId>hawtdispatch-transport</artifactId>
<version>${hawtdispatch-version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.linkedin</groupId>
<artifactId>org.linkedin.zookeeper-impl</artifactId>
<version>${linkedin-zookeeper-version}</version>
<scope>provided</scope>
<exclusions>
<exclusion>
<groupId>org.json</groupId>
<artifactId>json</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.linkedin</groupId>
<artifactId>org.linkedin.util-core</artifactId>
<version>${linkedin-zookeeper-version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
<version>${zookeeper-version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.osgi</groupId>
<artifactId>osgi.core</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.osgi</groupId>
<artifactId>osgi.cmpn</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
<scope>test</scope>
</dependency>
<!-- For Optional Snappy Compression -->
<dependency>
<groupId>org.xerial.snappy</groupId>
<artifactId>snappy-java</artifactId>
<version>${snappy-version}</version>
</dependency>
<dependency>
<groupId>org.iq80.snappy</groupId>
<artifactId>snappy</artifactId>
<version>0.2</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-core</artifactId>
<version>${hadoop-version}</version>
<scope>test</scope>
<exclusions>
<!-- hadoop's transative dependencies are such a pig -->
<exclusion>
<groupId>commons-beanutils</groupId>
<artifactId>commons-beanutils-core</artifactId>
</exclusion>
<exclusion>
<groupId>commons-cli</groupId>
<artifactId>commons-cli</artifactId>
</exclusion>
<exclusion>
<groupId>xmlenc</groupId>
<artifactId>xmlenc</artifactId>
</exclusion>
<exclusion>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.commons</groupId>
<artifactId>commons-math</artifactId>
</exclusion>
<exclusion>
<groupId>commons-net</groupId>
<artifactId>commons-net</artifactId>
</exclusion>
<exclusion>
<groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId>
</exclusion>
<exclusion>
<groupId>tomcat</groupId>
<artifactId>jasper-runtime</artifactId>
</exclusion>
<exclusion>
<groupId>tomcat</groupId>
<artifactId>jasper-compiler</artifactId>
</exclusion>
<exclusion>
<groupId>commons-el</groupId>
<artifactId>commons-el</artifactId>
</exclusion>
<exclusion>
<groupId>net.java.dev.jets3t</groupId>
<artifactId>jets3t</artifactId>
</exclusion>
<exclusion>
<groupId>net.sf.kosmosfs</groupId>
<artifactId>kfs</artifactId>
</exclusion>
<exclusion>
<groupId>hsqldb</groupId>
<artifactId>hsqldb</artifactId>
</exclusion>
<exclusion>
<groupId>oro</groupId>
<artifactId>oro</artifactId>
</exclusion>
<exclusion>
<groupId>org.eclipse.jdt</groupId>
<artifactId>core</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-mapper-asl</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty-util</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty-api-2.1</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jsp-2.1</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jsp-api-2.1</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- Testing Dependencies -->
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.activemq</groupId>
<artifactId>activemq-broker</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.activemq</groupId>
<artifactId>activemq-kahadb-store</artifactId>
<scope>test</scope>
</dependency>
<!-- Hadoop Testing Deps -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-test</artifactId>
<version>${hadoop-version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>${commons-lang-version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-math</artifactId>
<version>2.2</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.scalatest</groupId>
<artifactId>scalatest_2.11</artifactId>
<version>${scalatest-version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>net.alchim31.maven</groupId>
<artifactId>scala-maven-plugin</artifactId>
<version>${scala-plugin-version}</version>
<executions>
<execution>
<id>compile</id>
<goals>
<goal>compile</goal>
</goals>
<phase>compile</phase>
</execution>
<execution>
<id>test-compile</id>
<goals>
<goal>testCompile</goal>
</goals>
<phase>test-compile</phase>
</execution>
<execution>
<phase>process-resources</phase>
<goals>
<goal>compile</goal>
</goals>
</execution>
</executions>
<configuration>
<jvmArgs>
<jvmArg>-Xmx1024m</jvmArg>
<jvmArg>-Xss8m</jvmArg>
</jvmArgs>
<scalaVersion>${scala-version}</scalaVersion>
<args>
<arg>-deprecation</arg>
</args>
<compilerPlugins>
<!-- <compilerPlugin>
<groupId>org.fusesource.jvmassert</groupId>
<artifactId>jvmassert</artifactId>
<version>1.4</version>
</compilerPlugin> -->
</compilerPlugins>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<!-- we must turn off the use of system class loader so our tests can find stuff - otherwise ScalaSupport compiler can't find stuff -->
<useSystemClassLoader>false</useSystemClassLoader>
<childDelegation>false</childDelegation>
<useFile>true</useFile>
<failIfNoTests>false</failIfNoTests>
<excludes>
<exclude>**/EnqueueRateScenariosTest.*</exclude>
<exclude>**/DFSLevelDB*.*</exclude>
</excludes>
</configuration>
</plugin>
<plugin>
<groupId>org.fusesource.hawtbuf</groupId>
<artifactId>hawtbuf-protoc</artifactId>
<version>${hawtbuf-version}</version>
<configuration>
<type>alt</type>
</configuration>
<executions>
<execution>
<goals>
<goal>compile</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>build-helper-maven-plugin</artifactId>
<executions>
<execution>
<id>add-source</id>
<phase>generate-sources</phase>
<goals>
<goal>add-source</goal>
</goals>
<configuration>
<sources>
<source>${basedir}/src/main/scala</source>
<source>${basedir}/target/generated-sources/proto</source>
</sources>
</configuration>
</execution>
<execution>
<id>add-test-source</id>
<phase>generate-test-sources</phase>
<goals>
<goal>add-test-source</goal>
</goals>
<configuration>
<sources>
<source>${basedir}/src/test/scala</source>
</sources>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
<pluginManagement>
<plugins>
<!--This plugin's configuration is used to store Eclipse m2e settings only.
It has no influence on the Maven build itself.-->
<plugin>
<groupId>org.eclipse.m2e</groupId>
<artifactId>lifecycle-mapping</artifactId>
<version>1.0.0</version>
<configuration>
<lifecycleMappingMetadata>
<pluginExecutions>
<pluginExecution>
<pluginExecutionFilter>
<groupId>org.fusesource.hawtbuf</groupId>
<artifactId>hawtbuf-protoc</artifactId>
<versionRange>[${hawtbuf-version},)</versionRange>
<goals>
<goal>compile</goal>
</goals>
</pluginExecutionFilter>
<action>
<execute><runOnIncremental>true</runOnIncremental></execute>
<!--<ignore />-->
</action>
</pluginExecution>
<pluginExecution>
<pluginExecutionFilter>
<groupId>net.alchim31.maven</groupId>
<artifactId>scala-maven-plugin</artifactId>
<versionRange>[0.0.0,)</versionRange>
<goals>
<goal>compile</goal>
<goal>testCompile</goal>
</goals>
</pluginExecutionFilter>
<action>
<ignore />
</action>
</pluginExecution>
</pluginExecutions>
</lifecycleMappingMetadata>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-eclipse-plugin</artifactId>
<configuration>
<buildcommands>
<java.lang.String>org.scala-ide.sdt.core.scalabuilder</java.lang.String>
</buildcommands>
<projectnatures>
<nature>org.scala-ide.sdt.core.scalanature</nature>
<nature>org.eclipse.jdt.core.javanature</nature>
</projectnatures>
<sourceIncludes>
<sourceInclude>**/*.scala</sourceInclude>
</sourceIncludes>
</configuration>
</plugin>
</plugins>
</pluginManagement>
</build>
<profiles>
<profile>
<id>activemq.tests-sanity</id>
<activation>
<property>
<name>activemq.tests</name>
<value>smoke</value>
</property>
</activation>
<build>
<plugins>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<includes>
<include>**/LevelDBStoreTest.*</include>
</includes>
</configuration>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>activemq.tests-autoTransport</id>
<activation>
<property>
<name>activemq.tests</name>
<value>autoTransport</value>
</property>
</activation>
<build>
<plugins>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<excludes>
<exclude>**</exclude>
</excludes>
</configuration>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>activemq.tests.windows.excludes</id>
<activation>
<os>
<family>Windows</family>
</os>
</activation>
<build>
<plugins>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<excludes combine.children="append">
<exclude>**/*.*</exclude>
</excludes>
</configuration>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>activemq.tests.solaris.excludes</id>
<activation>
<property>
<name>os.name</name>
<value>SunOS</value>
</property>
</activation>
<build>
<plugins>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<excludes combine.children="append">
<exclude>**/*.*</exclude>
</excludes>
</configuration>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>activemq.tests.aix.excludes</id>
<activation>
<property>
<name>os.name</name>
<value>AIX</value>
</property>
</activation>
<build>
<plugins>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<excludes combine.children="append">
<exclude>**/*.*</exclude>
</excludes>
</configuration>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>activemq.tests.hpux.excludes</id>
<activation>
<os>
<family>HP-UX</family>
</os>
</activation>
<build>
<plugins>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<excludes combine.children="append">
<exclude>**/*.*</exclude>
</excludes>
</configuration>
</plugin>
</plugins>
</build>
</profile>
</profiles>
</project>

View File

@ -1,94 +0,0 @@
# The LevelDB Store
## Overview
The LevelDB Store is message store implementation that can be used in ActiveMQ messaging servers.
## LevelDB vs KahaDB
How is the LevelDB Store better than the default KahaDB store:
* It maitains fewer index entries per message than KahaDB which means it has a higher persistent throughput.
* Faster recovery when a broker restarts
* Since the broker tends to write and read queue entries sequentially, the LevelDB based index provide a much better performance than the B-Tree based indexes of KahaDB which increases throughput.
* Unlike the KahaDB indexes, the LevelDB indexes support concurrent read access which further improves read throughput.
* Pauseless data log file garbage collection cycles.
* It uses fewer read IO operations to load stored messages.
* If a message is copied to multiple queues (Typically happens if your using virtual topics with multiple
consumers), then LevelDB will only journal the payload of the message once. KahaDB will journal it multiple times.
* It exposes it's status via JMX for monitoring
* Supports replication to get High Availability
See the following chart to get an idea on how much better you can expect the LevelDB store to perform vs the KahaDB store:
![kahadb-vs-leveldb.png ](https://raw.github.com/fusesource/fuse-extra/master/fusemq-leveldb/kahadb-vs-leveldb.png)
## How to Use with ActiveMQ
Update the broker configuration file and change `persistenceAdapter` elements
settings so that it uses the LevelDB store using the following spring XML
configuration example:
<persistenceAdapter>
<levelDB directory="${activemq.base}/data/leveldb" logSize="107374182"/>
</persistenceAdapter>
### Configuration / Property Reference
*TODO*
### JMX Attribute and Operation Reference
*TODO*
## Known Limitations
* The store does not do any dup detection of messages.
## Built in High Availability Support
You can also use a High Availability (HA) version of the LevelDB store which
works with Hadoop based file systems to achieve HA of your stored messages.
**Q:** What are the requirements?
**A:** An existing Hadoop 1.0.0 cluster
**Q:** How does it work during the normal operating cycle?
A: It uses HDFS to store a highly available copy of the local leveldb storage files. As local log files are being written to, it also maintains a mirror copy on HDFS. If you have sync enabled on the store, a HDFS file sync is performed instead of a local disk sync. When the index is check pointed, we upload any previously not uploaded leveldb .sst files to HDFS.
**Q:** What happens when a broker fails and we startup a new slave to take over?
**A:** The slave will download from HDFS the log files and the .sst files associated with the latest uploaded index. Then normal leveldb store recovery kicks in which updates the index using the log files.
**Q:** How do I use the HA version of the LevelDB store?
**A:** Update your activemq.xml to use a `persistenceAdapter` setting similar to the following:
<persistenceAdapter>
<bean xmlns="http://www.springframework.org/schema/beans"
class="org.apache.activemq.leveldb.HALevelDBStore">
<!-- File system URL to replicate to -->
<property name="dfsUrl" value="hdfs://hadoop-name-node"/>
<!-- Directory in the file system to store the data in -->
<property name="dfsDirectory" value="activemq"/>
<property name="directory" value="${activemq.base}/data/leveldb"/>
<property name="logSize" value="107374182"/>
<!-- <property name="sync" value="false"/> -->
</bean>
</persistenceAdapter>
Notice the implementation class name changes to 'HALevelDBStore'
Instead of using a 'dfsUrl' property you can instead also just load an existing Hadoop configuration file if it's available on your system, for example:
<property name="dfsConfig" value="/opt/hadoop-1.0.0/conf/core-site.xml"/>
**Q:** Who handles starting up the Slave?
**A:** You do. :) This implementation assumes master startup/elections are performed externally and that 2 brokers are never running against the same HDFS file path. In practice this means you need something like ZooKeeper to control starting new brokers to take over failed masters.
**Q:** Can this run against something other than HDFS?
**A:** It should be able to run with any Hadoop supported file system like CloudStore, S3, MapR, NFS, etc (Well at least in theory, I've only tested against HDFS).
**Q:** Can 'X' performance be optimized?
**A:** There are bunch of way to improve the performance of many of the things that current version of the store is doing. For example, aggregating the .sst files into an archive to make more efficient use of HDFS, concurrent downloading to improve recovery performance. Lazy downloading of the oldest log files to make recovery faster. Async HDFS writes to avoid blocking local updates. Running brokers in a warm 'standy' mode which keep downloading new log updates and applying index updates from the master as they get uploaded to HDFS to get faster failovers.
**Q:** Does the broker fail if HDFS fails?
**A:** Currently, yes. But it should be possible to make the master resilient to HDFS failures.

View File

@ -1,208 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb;
import org.apache.activemq.store.PersistenceAdapter;
import org.apache.activemq.store.PersistenceAdapterFactory;
import java.io.File;
import java.io.IOException;
/**
* A factory which can create configured LevelDBStore objects.
*/
public class LevelDBStoreFactory implements PersistenceAdapterFactory {
private int asyncBufferSize = 1024*1024*4;
private File directory = new File("LevelDB");
private int flushDelay = 1000*5;
private int indexBlockRestartInterval = 16;
private int indexBlockSize = 4 * 1024;
private long indexCacheSize = 1024 * 1024 * 256L;
private String indexCompression = "snappy";
private String indexFactory = "org.fusesource.leveldbjni.JniDBFactory, org.iq80.leveldb.impl.Iq80DBFactory";
private int indexMaxOpenFiles = 1000;
private int indexWriteBufferSize = 1024*1024*6;
private String logCompression = "none";
private File logDirectory;
private long logSize = 1024 * 1024 * 100;
private boolean monitorStats;
private boolean paranoidChecks;
private boolean sync = true;
private boolean verifyChecksums;
@Override
public PersistenceAdapter createPersistenceAdapter() throws IOException {
LevelDBStore store = new LevelDBStore();
store.setVerifyChecksums(verifyChecksums);
store.setAsyncBufferSize(asyncBufferSize);
store.setDirectory(directory);
store.setFlushDelay(flushDelay);
store.setIndexBlockRestartInterval(indexBlockRestartInterval);
store.setIndexBlockSize(indexBlockSize);
store.setIndexCacheSize(indexCacheSize);
store.setIndexCompression(indexCompression);
store.setIndexFactory(indexFactory);
store.setIndexMaxOpenFiles(indexMaxOpenFiles);
store.setIndexWriteBufferSize(indexWriteBufferSize);
store.setLogCompression(logCompression);
store.setLogDirectory(logDirectory);
store.setLogSize(logSize);
store.setMonitorStats(monitorStats);
store.setParanoidChecks(paranoidChecks);
store.setSync(sync);
return store;
}
public int getAsyncBufferSize() {
return asyncBufferSize;
}
public void setAsyncBufferSize(int asyncBufferSize) {
this.asyncBufferSize = asyncBufferSize;
}
public File getDirectory() {
return directory;
}
public void setDirectory(File directory) {
this.directory = directory;
}
public int getFlushDelay() {
return flushDelay;
}
public void setFlushDelay(int flushDelay) {
this.flushDelay = flushDelay;
}
public int getIndexBlockRestartInterval() {
return indexBlockRestartInterval;
}
public void setIndexBlockRestartInterval(int indexBlockRestartInterval) {
this.indexBlockRestartInterval = indexBlockRestartInterval;
}
public int getIndexBlockSize() {
return indexBlockSize;
}
public void setIndexBlockSize(int indexBlockSize) {
this.indexBlockSize = indexBlockSize;
}
public long getIndexCacheSize() {
return indexCacheSize;
}
public void setIndexCacheSize(long indexCacheSize) {
this.indexCacheSize = indexCacheSize;
}
public String getIndexCompression() {
return indexCompression;
}
public void setIndexCompression(String indexCompression) {
this.indexCompression = indexCompression;
}
public String getIndexFactory() {
return indexFactory;
}
public void setIndexFactory(String indexFactory) {
this.indexFactory = indexFactory;
}
public int getIndexMaxOpenFiles() {
return indexMaxOpenFiles;
}
public void setIndexMaxOpenFiles(int indexMaxOpenFiles) {
this.indexMaxOpenFiles = indexMaxOpenFiles;
}
public int getIndexWriteBufferSize() {
return indexWriteBufferSize;
}
public void setIndexWriteBufferSize(int indexWriteBufferSize) {
this.indexWriteBufferSize = indexWriteBufferSize;
}
public String getLogCompression() {
return logCompression;
}
public void setLogCompression(String logCompression) {
this.logCompression = logCompression;
}
public File getLogDirectory() {
return logDirectory;
}
public void setLogDirectory(File logDirectory) {
this.logDirectory = logDirectory;
}
public long getLogSize() {
return logSize;
}
public void setLogSize(long logSize) {
this.logSize = logSize;
}
public boolean isMonitorStats() {
return monitorStats;
}
public void setMonitorStats(boolean monitorStats) {
this.monitorStats = monitorStats;
}
public boolean isParanoidChecks() {
return paranoidChecks;
}
public void setParanoidChecks(boolean paranoidChecks) {
this.paranoidChecks = paranoidChecks;
}
public boolean isSync() {
return sync;
}
public void setSync(boolean sync) {
this.sync = sync;
}
public boolean isVerifyChecksums() {
return verifyChecksums;
}
public void setVerifyChecksums(boolean verifyChecksums) {
this.verifyChecksums = verifyChecksums;
}
}

View File

@ -1,56 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb;
import org.apache.activemq.broker.jmx.MBeanInfo;
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
public interface LevelDBStoreTestMBean {
@MBeanInfo("Used to set if the log force calls should be suspended")
void setSuspendForce(boolean value);
@MBeanInfo("Gets if the log force calls should be suspended")
boolean getSuspendForce();
@MBeanInfo("Gets the number of threads waiting to do a log force call.")
long getForceCalls();
@MBeanInfo("Used to set if the log write calls should be suspended")
void setSuspendWrite(boolean value);
@MBeanInfo("Gets if the log write calls should be suspended")
boolean getSuspendWrite();
@MBeanInfo("Gets the number of threads waiting to do a log write call.")
long getWriteCalls();
@MBeanInfo("Used to set if the log delete calls should be suspended")
void setSuspendDelete(boolean value);
@MBeanInfo("Gets if the log delete calls should be suspended")
boolean getSuspendDelete();
@MBeanInfo("Gets the number of threads waiting to do a log delete call.")
long getDeleteCalls();
}

View File

@ -1,111 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb;
import org.apache.activemq.broker.jmx.MBeanInfo;
import java.io.File;
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
public interface LevelDBStoreViewMBean {
@MBeanInfo("The directory holding the store index data.")
String getIndexDirectory();
@MBeanInfo("The directory holding the store log data.")
String getLogDirectory();
@MBeanInfo("The size the log files are allowed to grow to.")
long getLogSize();
@MBeanInfo("The implementation of the LevelDB index being used.")
String getIndexFactory();
@MBeanInfo("Are writes synced to disk.")
boolean getSync();
@MBeanInfo("Is data verified against checksums as it's loaded back from disk.")
boolean getVerifyChecksums();
@MBeanInfo("The maximum number of open files the index will open at one time.")
int getIndexMaxOpenFiles();
@MBeanInfo("Number of keys between restart points for delta encoding of keys in the index")
int getIndexBlockRestartInterval();
@MBeanInfo("Do aggressive checking of store data")
boolean getParanoidChecks();
@MBeanInfo("Amount of data to build up in memory for the index before converting to a sorted on-disk file.")
int getIndexWriteBufferSize();
@MBeanInfo("Approximate size of user data packed per block for the index")
int getIndexBlockSize();
@MBeanInfo("The type of compression to use for the index")
String getIndexCompression();
@MBeanInfo("The size of the cache index")
long getIndexCacheSize();
@MBeanInfo("The maximum amount of async writes to buffer up")
int getAsyncBufferSize();
@MBeanInfo("The number of units of work which have been closed.")
long getUowClosedCounter();
@MBeanInfo("The number of units of work which have been canceled.")
long getUowCanceledCounter();
@MBeanInfo("The number of units of work which started getting stored.")
long getUowStoringCounter();
@MBeanInfo("The number of units of work which completed getting stored")
long getUowStoredCounter();
@MBeanInfo("Gets and resets the maximum time (in ms) a unit of work took to complete.")
double resetUowMaxCompleteLatency();
@MBeanInfo("Gets and resets the maximum time (in ms) an index write batch took to execute.")
double resetMaxIndexWriteLatency();
@MBeanInfo("Gets and resets the maximum time (in ms) a log write took to execute (includes the index write latency).")
double resetMaxLogWriteLatency();
@MBeanInfo("Gets and resets the maximum time (in ms) a log flush took to execute.")
double resetMaxLogFlushLatency();
@MBeanInfo("Gets and resets the maximum time (in ms) a log rotation took to perform.")
double resetMaxLogRotateLatency();
@MBeanInfo("Gets the maximum time (in ms) a unit of work took to complete.")
double getUowMaxCompleteLatency();
@MBeanInfo("Gets the maximum time (in ms) an index write batch took to execute.")
double getMaxIndexWriteLatency();
@MBeanInfo("Gets the maximum time (in ms) a log write took to execute (includes the index write latency).")
double getMaxLogWriteLatency();
@MBeanInfo("Gets the maximum time (in ms) a log flush took to execute.")
double getMaxLogFlushLatency();
@MBeanInfo("Gets the maximum time (in ms) a log rotation took to perform.")
double getMaxLogRotateLatency();
@MBeanInfo("Gets the index statistics.")
String getIndexStats();
@MBeanInfo("Compacts disk usage")
void compact();
}

View File

@ -1,66 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.replicated;
import org.apache.activemq.broker.jmx.MBeanInfo;
import javax.management.openmbean.CompositeData;
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
public interface ReplicatedLevelDBStoreViewMBean {
@MBeanInfo("The address of the ZooKeeper server.")
String getZkAddress();
@MBeanInfo("The path in ZooKeeper to hold master elections.")
String getZkPath();
@MBeanInfo("The ZooKeeper session timeout.")
String getZkSessionTimeout();
@MBeanInfo("The address and port the master will bind for the replication protocol.")
String getBind();
@MBeanInfo("The number of replication nodes that will be part of the replication cluster.")
int getReplicas();
@MBeanInfo("The role of this node in the replication cluster.")
String getNodeRole();
@MBeanInfo("The replication status.")
String getStatus();
@MBeanInfo("The status of the connected slaves.")
CompositeData[] getSlaves();
@MBeanInfo("The current position of the replication log.")
Long getPosition();
@MBeanInfo("When the last entry was added to the replication log.")
Long getPositionDate();
@MBeanInfo("The directory holding the data.")
String getDirectory();
@MBeanInfo("The sync strategy to use.")
String getSync();
@MBeanInfo("The node id of this replication node.")
String getNodeId();
}

View File

@ -1,42 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.replicated.dto;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlRootElement;
import java.util.HashSet;
import java.util.Set;
/**
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
@XmlRootElement(name="file_info")
@XmlAccessorType(XmlAccessType.FIELD)
public class FileInfo {
@XmlAttribute(name = "file")
public String file;
@XmlAttribute(name = "length")
public long length;
@XmlAttribute(name = "crc32")
public long crc32;
}

View File

@ -1,36 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.replicated.dto;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlRootElement;
/**
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
@XmlRootElement(name="remove_request")
@XmlAccessorType(XmlAccessType.FIELD)
@JsonIgnoreProperties(ignoreUnknown = true)
public class LogDelete {
@XmlAttribute(name="log")
public long log;
}

View File

@ -1,48 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.replicated.dto;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlRootElement;
/**
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
@XmlRootElement(name="log_write")
@XmlAccessorType(XmlAccessType.FIELD)
@JsonIgnoreProperties(ignoreUnknown = true)
public class LogWrite {
@XmlAttribute(name="file")
public long file;
@XmlAttribute(name="offset")
public long offset;
@XmlAttribute(name="length")
public long length;
@XmlAttribute(name="sync")
public boolean sync=false;
@XmlAttribute(name="date")
public long date;
}

View File

@ -1,41 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.replicated.dto;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlRootElement;
/**
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
@XmlRootElement(name="login")
@XmlAccessorType(XmlAccessType.FIELD)
@JsonIgnoreProperties(ignoreUnknown = true)
public class Login {
@XmlAttribute(name="node_id")
public String node_id;
@XmlAttribute(name="security_token")
public String security_token;
}

View File

@ -1,48 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.replicated.dto;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlRootElement;
import java.util.HashSet;
import java.util.Set;
/**
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
@XmlRootElement(name="sync_response")
@XmlAccessorType(XmlAccessType.FIELD)
public class SyncResponse {
@XmlAttribute(name = "snapshot_position")
public long snapshot_position;
@XmlAttribute(name = "wal_append_position")
public long wal_append_position;
@XmlAttribute(name = "index_files")
public Set<FileInfo> index_files = new HashSet<FileInfo>();
@XmlAttribute(name = "log_files")
public Set<FileInfo> log_files = new HashSet<FileInfo>();
@XmlAttribute(name = "append_log")
public String append_log;
}

View File

@ -1,40 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.replicated.dto;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlRootElement;
/**
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
@XmlRootElement(name="transfer_request")
@XmlAccessorType(XmlAccessType.FIELD)
@JsonIgnoreProperties(ignoreUnknown = true)
public class Transfer {
@XmlAttribute(name="file")
public String file;
@XmlAttribute(name="offset")
public long offset;
@XmlAttribute(name="length")
public long length;
}

View File

@ -1,36 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.replicated.dto;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlRootElement;
/**
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
@XmlRootElement(name="transfer_request")
@XmlAccessorType(XmlAccessType.FIELD)
@JsonIgnoreProperties(ignoreUnknown = true)
public class WalAck {
@XmlAttribute(name="position")
public long position;
}

View File

@ -1,30 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.store.leveldb;
import org.apache.activemq.leveldb.LevelDBStore;
/**
* An implementation of {@link org.apache.activemq.store.PersistenceAdapter} designed for use with
* LevelDB - Embedded Lightweight Non-Relational Database
*
* @org.apache.xbean.XBean element="levelDB"
*
*/
public class LevelDBPersistenceAdapter extends LevelDBStore {
}

View File

@ -1,31 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.store.leveldb;
import org.apache.activemq.leveldb.replicated.ElectingLevelDBStore;
import org.apache.activemq.store.PersistenceAdapter;
/**
* An implementation of {@link org.apache.activemq.store.PersistenceAdapter} designed for use with
* LevelDB - Embedded Lightweight Non-Relational Database
*
* @org.apache.xbean.XBean element="replicatedLevelDB"
*
*/
public class ReplicatedLevelDBPersistenceAdapter extends ElectingLevelDBStore implements PersistenceAdapter {
}

View File

@ -1,57 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package org.apache.activemq.leveldb.record;
option java_multiple_files = true;
//
// We create a collection record for each
// transaction, queue, topic.
//
message CollectionKey {
required int64 key = 1;
}
message CollectionRecord {
optional int64 key = 1;
optional int32 type = 2;
optional bytes meta = 3 [java_override_type = "Buffer"];
}
//
// We create a entry record for each message, subscription,
// and subscription position.
//
message EntryKey {
required int64 collection_key = 1;
required bytes entry_key = 2 [java_override_type = "Buffer"];
}
message EntryRecord {
optional int64 collection_key = 1;
optional bytes entry_key = 2 [java_override_type = "Buffer"];
optional int64 value_location = 3;
optional int32 value_length = 4;
optional bytes value = 5 [java_override_type = "Buffer"];
optional bytes meta = 6 [java_override_type = "Buffer"];
}
message SubscriptionRecord {
optional int64 topic_key = 1;
optional string client_id = 2;
optional string subscription_name = 3;
optional string selector = 4;
optional string destination_name = 5;
optional string subscribed_destination_name = 6;
}

View File

@ -1,139 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq
import java.nio.ByteBuffer
import org.fusesource.hawtbuf.Buffer
import org.xerial.snappy.{Snappy => Xerial}
import org.iq80.snappy.{Snappy => Iq80}
/**
* <p>
* A Snappy abstraction which attempts uses the iq80 implementation and falls back
* to the xerial Snappy implementation it cannot be loaded. You can change the
* load order by setting the 'leveldb.snappy' system property. Example:
*
* <code>
* -Dleveldb.snappy=xerial,iq80
* </code>
*
* The system property can also be configured with the name of a class which
* implements the Snappy.SPI interface.
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
package object leveldb {
final val Snappy = {
var attempt:SnappyTrait = null
System.getProperty("leveldb.snappy", "iq80,xerial").split(",").foreach { x =>
if( attempt==null ) {
try {
var name = x.trim();
name = name.toLowerCase match {
case "xerial" => "org.apache.activemq.leveldb.XerialSnappy"
case "iq80" => "org.apache.activemq.leveldb.IQ80Snappy"
case _ => name
}
attempt = Thread.currentThread().getContextClassLoader().loadClass(name).newInstance().asInstanceOf[SnappyTrait];
attempt.compress("test")
} catch {
case x:Throwable =>
attempt = null
}
}
}
attempt
}
trait SnappyTrait {
def uncompressed_length(input: Buffer):Int
def uncompress(input: Buffer, output:Buffer): Int
def max_compressed_length(length: Int): Int
def compress(input: Buffer, output: Buffer): Int
def compress(input: Buffer):Buffer = {
val compressed = new Buffer(max_compressed_length(input.length))
compressed.length = compress(input, compressed)
compressed
}
def compress(text: String): Buffer = {
val uncompressed = new Buffer(text.getBytes("UTF-8"))
val compressed = new Buffer(max_compressed_length(uncompressed.length))
compressed.length = compress(uncompressed, compressed)
return compressed
}
def uncompress(input: Buffer):Buffer = {
val uncompressed = new Buffer(uncompressed_length(input))
uncompressed.length = uncompress(input, uncompressed)
uncompressed
}
def uncompress(compressed: ByteBuffer, uncompressed: ByteBuffer): Int = {
val input = if (compressed.hasArray) {
new Buffer(compressed.array, compressed.arrayOffset + compressed.position, compressed.remaining)
} else {
val t = new Buffer(compressed.remaining)
compressed.mark
compressed.get(t.data)
compressed.reset
t
}
val output = if (uncompressed.hasArray) {
new Buffer(uncompressed.array, uncompressed.arrayOffset + uncompressed.position, uncompressed.capacity()-uncompressed.position)
} else {
new Buffer(uncompressed_length(input))
}
output.length = uncompress(input, output)
if (uncompressed.hasArray) {
uncompressed.limit(uncompressed.position + output.length)
} else {
val p = uncompressed.position
uncompressed.limit(uncompressed.capacity)
uncompressed.put(output.data, output.offset, output.length)
uncompressed.flip.position(p)
}
return output.length
}
}
}
package leveldb {
class XerialSnappy extends SnappyTrait {
override def uncompress(compressed: ByteBuffer, uncompressed: ByteBuffer) = Xerial.uncompress(compressed, uncompressed)
def uncompressed_length(input: Buffer) = Xerial.uncompressedLength(input.data, input.offset, input.length)
def uncompress(input: Buffer, output: Buffer) = Xerial.uncompress(input.data, input.offset, input.length, output.data, output.offset)
def max_compressed_length(length: Int) = Xerial.maxCompressedLength(length)
def compress(input: Buffer, output: Buffer) = Xerial.compress(input.data, input.offset, input.length, output.data, output.offset)
override def compress(text: String) = new Buffer(Xerial.compress(text))
}
class IQ80Snappy extends SnappyTrait {
def uncompressed_length(input: Buffer) = Iq80.getUncompressedLength(input.data, input.offset)
def uncompress(input: Buffer, output: Buffer): Int = Iq80.uncompress(input.data, input.offset, input.length, output.data, output.offset)
def compress(input: Buffer, output: Buffer): Int = Iq80.compress(input.data, input.offset, input.length, output.data, output.offset)
def max_compressed_length(length: Int) = Iq80.maxCompressedLength(length)
}
}

View File

@ -1,907 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb
import org.fusesource.hawtdispatch._
import org.fusesource.hawtdispatch.BaseRetained
import java.util.concurrent._
import atomic._
import org.fusesource.hawtbuf.Buffer
import org.apache.activemq.store.{ListenableFuture, MessageRecoveryListener}
import java.lang.ref.WeakReference
import scala.Option._
import org.fusesource.hawtbuf.Buffer._
import org.apache.activemq.command._
import org.apache.activemq.leveldb.record.{SubscriptionRecord, CollectionRecord}
import java.util.HashMap
import collection.mutable.{HashSet, ListBuffer}
import org.apache.activemq.util.ByteSequence
import util.TimeMetric
import scala.Some
import org.apache.activemq.ActiveMQMessageAuditNoSync
import org.fusesource.hawtdispatch
import org.apache.activemq.broker.SuppressReplyException
case class EntryLocator(qid:Long, seq:Long)
case class DataLocator(store:LevelDBStore, pos:Long, len:Int) {
override def toString: String = "DataLocator(%x, %d)".format(pos, len)
}
case class MessageRecord(store:LevelDBStore, id:MessageId, data:Buffer, syncNeeded:Boolean) {
var locator:DataLocator = _
}
case class QueueEntryRecord(id:MessageId, queueKey:Long, queueSeq:Long, deliveries:Int=0)
case class QueueRecord(id:ActiveMQDestination, queue_key:Long)
case class QueueEntryRange()
case class SubAckRecord(subKey:Long, ackPosition:Long)
case class XaAckRecord(container:Long, seq:Long, ack:MessageAck, sub:Long = -1)
sealed trait UowState {
def stage:Int
}
// UoW is initial open.
object UowOpen extends UowState {
override def stage = 0
override def toString = "UowOpen"
}
// UoW is Committed once the broker finished creating it.
object UowClosed extends UowState {
override def stage = 1
override def toString = "UowClosed"
}
// UOW is delayed until we send it to get flushed.
object UowDelayed extends UowState {
override def stage = 2
override def toString = "UowDelayed"
}
object UowFlushQueued extends UowState {
override def stage = 3
override def toString = "UowFlushQueued"
}
object UowFlushing extends UowState {
override def stage = 4
override def toString = "UowFlushing"
}
// Then it moves on to be flushed. Flushed just
// means the message has been written to disk
// and out of memory
object UowFlushed extends UowState {
override def stage = 5
override def toString = "UowFlushed"
}
// Once completed then you know it has been synced to disk.
object UowCompleted extends UowState {
override def stage = 6
override def toString = "UowCompleted"
}
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class CountDownFuture[T <: AnyRef]() extends ListenableFuture[T] {
private val latch:CountDownLatch=new CountDownLatch(1)
@volatile
var value:T = _
var error:Throwable = _
var listener:Runnable = _
def cancel(mayInterruptIfRunning: Boolean) = false
def isCancelled = false
def completed = latch.getCount()==0
def await() = latch.await()
def await(p1: Long, p2: TimeUnit) = latch.await(p1, p2)
def set(v:T) = {
value = v
latch.countDown()
fireListener
}
def failed(v:Throwable) = {
error = v
latch.countDown()
fireListener
}
def get() = {
latch.await()
if( error!=null ) {
throw error;
}
value
}
def get(p1: Long, p2: TimeUnit) = {
if(latch.await(p1, p2)) {
if( error!=null ) {
throw error;
}
value
} else {
throw new TimeoutException
}
}
def isDone = latch.await(0, TimeUnit.SECONDS);
def fireListener = {
if (listener != null) {
try {
listener.run()
} catch {
case e : Throwable => {
LevelDBStore.warn(e, "unexpected exception on future listener " +listener)
}
}
}
}
def addListener(l: Runnable) = {
listener = l
if (isDone) {
fireListener
}
}
}
object UowManagerConstants {
val QUEUE_COLLECTION_TYPE = 1
val TOPIC_COLLECTION_TYPE = 2
val TRANSACTION_COLLECTION_TYPE = 3
val SUBSCRIPTION_COLLECTION_TYPE = 4
case class QueueEntryKey(queue:Long, seq:Long)
def key(x:QueueEntryRecord) = QueueEntryKey(x.queueKey, x.queueSeq)
}
import UowManagerConstants._
class DelayableUOW(val manager:DBManager) extends BaseRetained {
val countDownFuture = new CountDownFuture[AnyRef]()
var canceled = false;
val uowId:Int = manager.lastUowId.incrementAndGet()
var actions = Map[MessageId, MessageAction]()
var subAcks = ListBuffer[SubAckRecord]()
var completed = false
var disableDelay = false
var delayableActions = 0
private var _state:UowState = UowOpen
def state = this._state
def state_=(next:UowState) {
assert(this._state.stage < next.stage)
this._state = next
}
var syncFlag = false
def syncNeeded = syncFlag || actions.find( _._2.syncNeeded ).isDefined
def size = 100+actions.foldLeft(0L){ case (sum, entry) =>
sum + (entry._2.size+100)
} + (subAcks.size * 100)
class MessageAction {
var id:MessageId = _
var messageRecord: MessageRecord = null
var enqueues = ListBuffer[QueueEntryRecord]()
var dequeues = ListBuffer[QueueEntryRecord]()
var xaAcks = ListBuffer[XaAckRecord]()
def uow = DelayableUOW.this
def isEmpty() = messageRecord==null && enqueues.isEmpty && dequeues.isEmpty && xaAcks.isEmpty
def cancel() = {
uow.rm(id)
}
def syncNeeded = messageRecord!=null && messageRecord.syncNeeded
def size = (if(messageRecord!=null) messageRecord.data.length+20 else 0) + ((enqueues.size+dequeues.size)*50) + xaAcks.foldLeft(0L){ case (sum, entry) =>
sum + 100
}
def addToPendingStore() = {
var set = manager.pendingStores.get(id)
if(set==null) {
set = HashSet()
manager.pendingStores.put(id, set)
}
set.add(this)
}
def removeFromPendingStore() = {
var set = manager.pendingStores.get(id)
if(set!=null) {
set.remove(this)
if(set.isEmpty) {
manager.pendingStores.remove(id)
}
}
}
}
def completeAsap() = this.synchronized { disableDelay=true }
def delayable = !disableDelay && delayableActions>0 && manager.flushDelay>0
def rm(msg:MessageId) = {
actions -= msg
if( actions.isEmpty && state.stage < UowFlushing.stage ) {
cancel
}
}
def cancel = {
manager.dispatchQueue.assertExecuting()
manager.uowCanceledCounter += 1
canceled = true
manager.flush_queue.remove(uowId)
onCompleted()
}
def getAction(id:MessageId) = {
actions.get(id) match {
case Some(x) => x
case None =>
val x = new MessageAction
x.id = id
actions += id->x
x
}
}
def updateAckPosition(sub_key:Long, ack_seq:Long) = {
subAcks += SubAckRecord(sub_key, ack_seq)
}
def xaAck(record:XaAckRecord) = {
this.synchronized {
getAction(record.ack.getLastMessageId).xaAcks+=record
}
countDownFuture
}
def enqueue(queueKey:Long, queueSeq:Long, message:Message, delay_enqueue:Boolean) = {
var delay = delay_enqueue && message.getTransactionId==null
if(delay ) {
manager.uowEnqueueDelayReqested += 1
} else {
manager.uowEnqueueNodelayReqested += 1
}
val id = message.getMessageId
def create_message_record: MessageRecord = {
// encodes body and release object bodies, in case message was sent from
// a VM connection. Releases additional memory.
message.storeContentAndClear()
var packet = manager.parent.wireFormat.marshal(message)
var data = new Buffer(packet.data, packet.offset, packet.length)
if (manager.snappyCompressLogs) {
data = Snappy.compress(data)
}
val record = MessageRecord(manager.parent, id, data, message.isResponseRequired)
id.setDataLocator(record)
record
}
val messageRecord = id.getDataLocator match {
case null =>
create_message_record
case record:MessageRecord =>
if( record.store == manager.parent ) {
record
} else {
create_message_record
}
case x:DataLocator =>
if( x.store == manager.parent ) {
null
} else {
create_message_record
}
}
val entry = QueueEntryRecord(id, queueKey, queueSeq)
assert(id.getEntryLocator == null)
id.setEntryLocator(EntryLocator(queueKey, queueSeq))
val a = this.synchronized {
if( !delay )
disableDelay = true
val action = getAction(entry.id)
action.messageRecord = messageRecord
action.enqueues += entry
delayableActions += 1
action
}
manager.dispatchQueue {
manager.cancelable_enqueue_actions.put(key(entry), a)
a.addToPendingStore()
}
countDownFuture
}
def incrementRedelivery(expectedQueueKey:Long, id:MessageId) = {
if( id.getEntryLocator != null ) {
val EntryLocator(queueKey, queueSeq) = id.getEntryLocator.asInstanceOf[EntryLocator];
assert(queueKey == expectedQueueKey)
val counter = manager.client.getDeliveryCounter(queueKey, queueSeq)
val entry = QueueEntryRecord(id, queueKey, queueSeq, counter+1)
val a = this.synchronized {
val action = getAction(entry.id)
action.enqueues += entry
delayableActions += 1
action
}
manager.dispatchQueue {
manager.cancelable_enqueue_actions.put(key(entry), a)
a.addToPendingStore()
}
}
countDownFuture
}
def dequeue(expectedQueueKey:Long, id:MessageId) = {
if( id.getEntryLocator != null ) {
val EntryLocator(queueKey, queueSeq) = id.getEntryLocator.asInstanceOf[EntryLocator];
assert(queueKey == expectedQueueKey)
val entry = QueueEntryRecord(id, queueKey, queueSeq)
this.synchronized {
getAction(id).dequeues += entry
}
}
countDownFuture
}
def complete_asap = this.synchronized {
disableDelay=true
if( state eq UowDelayed ) {
manager.enqueueFlush(this)
}
}
var complete_listeners = ListBuffer[()=>Unit]()
def addCompleteListener(func: =>Unit) = {
complete_listeners.append( func _ )
}
var asyncCapacityUsed = 0L
var disposed_at = 0L
override def dispose = this.synchronized {
state = UowClosed
disposed_at = System.nanoTime()
if( !syncNeeded ) {
val s = size
if( manager.asyncCapacityRemaining.addAndGet(-s) > 0 ) {
asyncCapacityUsed = s
complete_listeners.foreach(_())
} else {
manager.asyncCapacityRemaining.addAndGet(s)
}
}
// closeSource.merge(this)
manager.dispatchQueue {
manager.processClosed(this)
}
}
def onCompleted(error:Throwable=null) = this.synchronized {
if ( state.stage < UowCompleted.stage ) {
state = UowCompleted
if( asyncCapacityUsed != 0 ) {
manager.asyncCapacityRemaining.addAndGet(asyncCapacityUsed)
asyncCapacityUsed = 0
} else {
manager.uow_complete_latency.add(System.nanoTime() - disposed_at)
complete_listeners.foreach(_())
}
if( error == null ) {
countDownFuture.set(null)
} else {
countDownFuture.failed(error)
}
for( (id, action) <- actions ) {
if( !action.enqueues.isEmpty ) {
action.removeFromPendingStore()
}
for( queueEntry <- action.enqueues ) {
manager.cancelable_enqueue_actions.remove(key(queueEntry))
}
}
super.dispose
}
}
}
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class DBManager(val parent:LevelDBStore) {
var lastCollectionKey = new AtomicLong(0)
var lastPListKey = new AtomicLong(0)
def client = parent.client
def writeExecutor = client.writeExecutor
def flushDelay = parent.flushDelay
val dispatchQueue = createQueue(toString)
// val aggregator = new AggregatingExecutor(dispatchQueue)
val asyncCapacityRemaining = new AtomicLong(0L)
def createUow() = new DelayableUOW(this)
var uowEnqueueDelayReqested = 0L
var uowEnqueueNodelayReqested = 0L
var uowClosedCounter = 0L
var uowCanceledCounter = 0L
var uowStoringCounter = 0L
var uowStoredCounter = 0L
val uow_complete_latency = TimeMetric()
// val closeSource = createSource(new ListEventAggregator[DelayableUOW](), dispatchQueue)
// closeSource.setEventHandler(^{
// closeSource.getData.foreach { uow =>
// processClosed(uow)
// }
// });
// closeSource.resume
var pendingStores = new ConcurrentHashMap[MessageId, HashSet[DelayableUOW#MessageAction]]()
var cancelable_enqueue_actions = new HashMap[QueueEntryKey, DelayableUOW#MessageAction]()
val lastUowId = new AtomicInteger(1)
var producerSequenceIdTracker = new ActiveMQMessageAuditNoSync
def getLastProducerSequenceId(id: ProducerId): Long = dispatchQueue.sync {
producerSequenceIdTracker.getLastSeqId(id)
}
def processClosed(uow:DelayableUOW) = {
dispatchQueue.assertExecuting()
uowClosedCounter += 1
// Broker could issue a flush_message call before
// this stage runs.. which make the stage jump over UowDelayed
if( uow.state.stage < UowDelayed.stage ) {
uow.state = UowDelayed
}
if( uow.state.stage < UowFlushing.stage ) {
uow.actions.foreach { case (id, action) =>
// The UoW may have been canceled.
if( action.messageRecord!=null && action.enqueues.isEmpty ) {
action.removeFromPendingStore()
action.messageRecord = null
uow.delayableActions -= 1
}
if( action.isEmpty ) {
action.cancel()
}
// dequeues can cancel out previous enqueues
action.dequeues.foreach { entry=>
val entry_key = key(entry)
val prev_action:DelayableUOW#MessageAction = cancelable_enqueue_actions.remove(entry_key)
if( prev_action!=null ) {
val prev_uow = prev_action.uow
prev_uow.synchronized {
if( !prev_uow.canceled ) {
prev_uow.delayableActions -= 1
// yay we can cancel out a previous enqueue
prev_action.enqueues = prev_action.enqueues.filterNot( x=> key(x) == entry_key )
if( prev_uow.state.stage >= UowDelayed.stage ) {
// if the message is not in any queues.. we can gc it..
if( prev_action.enqueues == Nil && prev_action.messageRecord !=null ) {
prev_action.removeFromPendingStore()
prev_action.messageRecord = null
prev_uow.delayableActions -= 1
}
// Cancel the action if it's now empty
if( prev_action.isEmpty ) {
prev_action.cancel()
} else if( !prev_uow.delayable ) {
// flush it if there is no point in delaying anymore
prev_uow.complete_asap
}
}
}
}
// since we canceled out the previous enqueue.. now cancel out the action
action.dequeues = action.dequeues.filterNot( _ == entry)
if( action.isEmpty ) {
action.cancel()
}
}
}
}
}
if( !uow.canceled && uow.state.stage < UowFlushQueued.stage ) {
if( uow.delayable ) {
// Let the uow get GCed if its' canceled during the delay window..
val ref = new WeakReference[DelayableUOW](uow)
scheduleFlush(ref)
} else {
enqueueFlush(uow)
}
}
}
private def scheduleFlush(ref: WeakReference[DelayableUOW]) {
dispatchQueue.executeAfter(flushDelay, TimeUnit.MILLISECONDS, ^ {
val uow = ref.get();
if (uow != null) {
enqueueFlush(uow)
}
})
}
val flush_queue = new java.util.LinkedHashMap[Long, DelayableUOW]()
def enqueueFlush(uow:DelayableUOW) = {
dispatchQueue.assertExecuting()
if( uow!=null && !uow.canceled && uow.state.stage < UowFlushQueued.stage ) {
uow.state = UowFlushQueued
flush_queue.put (uow.uowId, uow)
flushSource.merge(1)
}
}
val flushSource = createSource(EventAggregators.INTEGER_ADD, dispatchQueue)
flushSource.setEventHandler(^{drainFlushes});
flushSource.resume
def drainFlushes:Unit = {
dispatchQueue.assertExecuting()
// Some UOWs may have been canceled.
import collection.JavaConversions._
val values = flush_queue.values().toSeq.toArray
flush_queue.clear()
val uows = values.flatMap { uow=>
if( uow.canceled ) {
None
} else {
// It will not be possible to cancel the UOW anymore..
uow.state = UowFlushing
uow.actions.foreach { case (_, action) =>
action.enqueues.foreach { queue_entry=>
val action = cancelable_enqueue_actions.remove(key(queue_entry))
assert(action!=null)
}
}
if( !started ) {
uow.onCompleted(new SuppressReplyException("Store stopped"))
None
} else {
Some(uow)
}
}
}
if( !uows.isEmpty ) {
uowStoringCounter += uows.size
flushSource.suspend
writeExecutor {
val e = try {
client.store(uows)
null
} catch {
case e:Throwable => e
}
flushSource.resume
dispatchQueue {
uowStoredCounter += uows.size
uows.foreach { uow=>
uow.onCompleted(e)
}
}
}
}
}
var started = false
def snappyCompressLogs = parent.snappyCompressLogs
def start = {
asyncCapacityRemaining.set(parent.asyncBufferSize)
client.start()
dispatchQueue.sync {
started = true
pollGc
if(parent.monitorStats) {
monitorStats
}
}
}
def stop() = {
dispatchQueue.sync {
started = false
}
client.stop()
}
def pollGc:Unit = dispatchQueue.after(10, TimeUnit.SECONDS) {
if( started ) {
val positions = parent.getTopicGCPositions
writeExecutor {
if( started ) {
client.gc(positions)
pollGc
}
}
}
}
def monitorStats:Unit = dispatchQueue.after(1, TimeUnit.SECONDS) {
if( started ) {
println(("committed: %d, canceled: %d, storing: %d, stored: %d, " +
"uow complete: %,.3f ms, " +
"index write: %,.3f ms, " +
"log write: %,.3f ms, log flush: %,.3f ms, log rotate: %,.3f ms"+
"add msg: %,.3f ms, add enqueue: %,.3f ms, " +
"uowEnqueueDelayReqested: %d, uowEnqueueNodelayReqested: %d "
).format(
uowClosedCounter, uowCanceledCounter, uowStoringCounter, uowStoredCounter,
uow_complete_latency.reset,
client.max_index_write_latency.reset,
client.log.max_log_write_latency.reset, client.log.max_log_flush_latency.reset, client.log.max_log_rotate_latency.reset,
client.max_write_message_latency.reset, client.max_write_enqueue_latency.reset,
uowEnqueueDelayReqested, uowEnqueueNodelayReqested
))
uowClosedCounter = 0
// uowCanceledCounter = 0
uowStoringCounter = 0
uowStoredCounter = 0
monitorStats
}
}
/////////////////////////////////////////////////////////////////////
//
// Implementation of the Store interface
//
/////////////////////////////////////////////////////////////////////
def checkpoint(sync:Boolean) = writeExecutor.sync {
client.snapshotIndex(sync)
}
def purge = writeExecutor.sync {
client.purge
lastCollectionKey.set(1)
}
def getLastQueueEntrySeq(key:Long) = {
client.getLastQueueEntrySeq(key)
}
def collectionEmpty(key:Long) = writeExecutor.sync {
client.collectionEmpty(key)
}
def collectionSize(key:Long) = {
client.collectionSize(key)
}
def collectionIsEmpty(key:Long) = {
client.collectionIsEmpty(key)
}
def cursorMessages(preparedAcks:java.util.HashSet[MessageId], key:Long, listener:MessageRecoveryListener, startPos:Long, endPos:Long=Long.MaxValue, max:Long=Long.MaxValue) = {
var lastmsgid:MessageId = null
var count = 0L
client.queueCursor(key, startPos, endPos) { msg =>
if( !preparedAcks.contains(msg.getMessageId) && listener.recoverMessage(msg) ) {
lastmsgid = msg.getMessageId
count += 1
}
count < max && listener.canRecoveryNextMessage
}
if( lastmsgid==null ) {
startPos
} else {
lastmsgid.getEntryLocator.asInstanceOf[EntryLocator].seq+1
}
}
def getXAActions(key:Long) = {
val msgs = ListBuffer[Message]()
val acks = ListBuffer[XaAckRecord]()
client.transactionCursor(key) { command =>
command match {
case message:Message => msgs += message
case record:XaAckRecord => acks += record
}
true
}
(msgs, acks)
}
def queuePosition(id: MessageId):Long = {
id.getEntryLocator.asInstanceOf[EntryLocator].seq
}
def createQueueStore(dest:ActiveMQQueue):LevelDBStore#LevelDBMessageStore = {
parent.createQueueMessageStore(dest, createCollection(utf8(dest.getQualifiedName), QUEUE_COLLECTION_TYPE))
}
def destroyQueueStore(key:Long) = writeExecutor.sync {
client.removeCollection(key)
}
def getLogAppendPosition = writeExecutor.sync {
client.getLogAppendPosition
}
def addSubscription(topic_key:Long, info:SubscriptionInfo):DurableSubscription = {
val record = new SubscriptionRecord.Bean
record.setTopicKey(topic_key)
record.setClientId(info.getClientId)
record.setSubscriptionName(info.getSubscriptionName)
if( info.getSelector!=null ) {
record.setSelector(info.getSelector)
}
if( info.getDestination!=null ) {
record.setDestinationName(info.getDestination.getQualifiedName)
}
if ( info.getSubscribedDestination!=null) {
record.setSubscribedDestinationName(info.getSubscribedDestination.getQualifiedName)
}
val collection = new CollectionRecord.Bean()
collection.setType(SUBSCRIPTION_COLLECTION_TYPE)
collection.setKey(lastCollectionKey.incrementAndGet())
collection.setMeta(record.freeze().toUnframedBuffer)
val buffer = collection.freeze()
buffer.toFramedBuffer // eager encode the record.
writeExecutor.sync {
client.addCollection(buffer)
}
DurableSubscription(collection.getKey, topic_key, info)
}
def removeSubscription(sub:DurableSubscription) {
writeExecutor.sync {
client.removeCollection(sub.subKey)
}
}
def createTopicStore(dest:ActiveMQTopic) = {
var key = createCollection(utf8(dest.getQualifiedName), TOPIC_COLLECTION_TYPE)
parent.createTopicMessageStore(dest, key)
}
def createCollection(name:Buffer, collectionType:Int) = {
val collection = new CollectionRecord.Bean()
collection.setType(collectionType)
collection.setMeta(name)
collection.setKey(lastCollectionKey.incrementAndGet())
val buffer = collection.freeze()
buffer.toFramedBuffer // eager encode the record.
writeExecutor.sync {
client.addCollection(buffer)
}
collection.getKey
}
def buffer(packet:ByteSequence) = new Buffer(packet.data, packet.offset, packet.length)
def createTransactionContainer(id:XATransactionId) =
createCollection(buffer(parent.wireFormat.marshal(id)), TRANSACTION_COLLECTION_TYPE)
def removeTransactionContainer(key:Long) = writeExecutor.sync {
client.removeCollection(key)
}
def loadCollections = {
val collections = writeExecutor.sync {
client.listCollections
}
var last = 0L
collections.foreach { case (key, record) =>
last = key
record.getType match {
case QUEUE_COLLECTION_TYPE =>
val dest = ActiveMQDestination.createDestination(record.getMeta.utf8().toString, ActiveMQDestination.QUEUE_TYPE).asInstanceOf[ActiveMQQueue]
parent.createQueueMessageStore(dest, key)
case TOPIC_COLLECTION_TYPE =>
val dest = ActiveMQDestination.createDestination(record.getMeta.utf8().toString, ActiveMQDestination.TOPIC_TYPE).asInstanceOf[ActiveMQTopic]
parent.createTopicMessageStore(dest, key)
case SUBSCRIPTION_COLLECTION_TYPE =>
val sr = SubscriptionRecord.FACTORY.parseUnframed(record.getMeta)
val info = new SubscriptionInfo
info.setClientId(sr.getClientId)
info.setSubscriptionName(sr.getSubscriptionName)
if( sr.hasSelector ) {
info.setSelector(sr.getSelector)
}
if( sr.hasDestinationName ) {
info.setDestination(ActiveMQDestination.createDestination(sr.getDestinationName, ActiveMQDestination.TOPIC_TYPE))
}
if( sr.hasSubscribedDestinationName ) {
info.setSubscribedDestination(ActiveMQDestination.createDestination(sr.getSubscribedDestinationName, ActiveMQDestination.TOPIC_TYPE))
}
var sub = DurableSubscription(key, sr.getTopicKey, info)
sub.lastAckPosition = client.getAckPosition(key);
sub.gcPosition = sub.lastAckPosition
parent.createSubscription(sub)
case TRANSACTION_COLLECTION_TYPE =>
val meta = record.getMeta
val txid = parent.wireFormat.unmarshal(new ByteSequence(meta.data, meta.offset, meta.length)).asInstanceOf[XATransactionId]
val transaction = parent.transaction(txid)
transaction.xacontainer_id = key
case _ =>
}
}
lastCollectionKey.set(last)
}
def createPList(name:String):LevelDBStore#LevelDBPList = {
parent.createPList(name, lastPListKey.incrementAndGet())
}
def destroyPList(key:Long) = writeExecutor.sync {
client.removePlist(key)
}
def plistPut(key:Array[Byte], value:Array[Byte]) = client.plistPut(key, value)
def plistGet(key:Array[Byte]) = client.plistGet(key)
def plistDelete(key:Array[Byte]) = client.plistDelete(key)
def plistIterator = client.plistIterator
def getMessage(x: MessageId):Message = {
val id = Option(pendingStores.get(x)).flatMap(_.headOption).map(_.id).getOrElse(x)
val locator = id.getDataLocator()
val msg = client.getMessage(locator)
if( msg!=null ) {
msg.setMessageId(id)
} else {
LevelDBStore.warn("Could not load messages for: "+x+" at: "+locator)
}
msg
}
}

View File

@ -1,663 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb
import java.{lang=>jl}
import java.{util=>ju}
import java.util.zip.CRC32
import java.util.Map.Entry
import java.util.concurrent.atomic.{AtomicInteger, AtomicLong}
import java.io._
import org.fusesource.hawtbuf.{DataByteArrayInputStream, DataByteArrayOutputStream, Buffer}
import org.fusesource.hawtdispatch.BaseRetained
import org.apache.activemq.leveldb.util.FileSupport._
import org.apache.activemq.util.LRUCache
import util.TimeMetric._
import util.{TimeMetric, Log}
import java.util.TreeMap
import java.util.concurrent.locks.{ReentrantReadWriteLock, ReadWriteLock}
import java.util.concurrent.CountDownLatch
object RecordLog extends Log {
// The log files contain a sequence of variable length log records:
// record := header + data
//
// header :=
// '*' : int8 // Start of Record Magic
// kind : int8 // Help identify content type of the data.
// checksum : uint32 // crc32c of the data[]
// length : uint32 // the length the the data
val LOG_HEADER_PREFIX = '*'.toByte
val UOW_END_RECORD = -1.toByte
val LOG_HEADER_SIZE = 10
val BUFFER_SIZE = 1024*512
val BYPASS_BUFFER_SIZE = 1024*16
case class LogInfo(file:File, position:Long, length:Long) {
def limit = position+length
}
def encode_long(a1:Long) = {
val out = new DataByteArrayOutputStream(8)
out.writeLong(a1)
out.toBuffer
}
def decode_long(value:Buffer):Long = {
val in = new DataByteArrayInputStream(value)
in.readLong()
}
}
class SuspendCallSupport {
val lock = new ReentrantReadWriteLock()
var resumeLatch:CountDownLatch = _
var resumedLatch:CountDownLatch = _
@volatile
var threads = new AtomicInteger()
def suspend = this.synchronized {
val suspended = new CountDownLatch(1)
resumeLatch = new CountDownLatch(1)
resumedLatch = new CountDownLatch(1)
new Thread("Suspend Lock") {
override def run = {
try {
lock.writeLock().lock()
suspended.countDown()
resumeLatch.await()
} finally {
lock.writeLock().unlock();
resumedLatch.countDown()
}
}
}.start()
suspended.await()
}
def resume = this.synchronized {
if( resumedLatch != null ) {
resumeLatch.countDown()
resumedLatch.await();
resumeLatch = null
resumedLatch = null
}
}
def call[T](func: =>T):T= {
threads.incrementAndGet()
lock.readLock().lock()
try {
func
} finally {
threads.decrementAndGet()
lock.readLock().unlock()
}
}
}
class RecordLogTestSupport {
val forceCall = new SuspendCallSupport()
val writeCall = new SuspendCallSupport()
val deleteCall = new SuspendCallSupport()
}
case class RecordLog(directory: File, logSuffix:String) {
import RecordLog._
directory.mkdirs()
var logSize = 1024 * 1024 * 100L
var current_appender:LogAppender = _
var verify_checksums = false
val log_infos = new TreeMap[Long, LogInfo]()
var recordLogTestSupport:RecordLogTestSupport =
if( java.lang.Boolean.getBoolean("org.apache.activemq.leveldb.test") ) {
new RecordLogTestSupport()
} else {
null
}
object log_mutex
def delete(id:Long) = {
log_mutex.synchronized {
// We can't delete the current appender.
if( current_appender.position != id ) {
Option(log_infos.get(id)).foreach { info =>
onDelete(info.file)
onDelete(id)
log_infos.remove(id)
reader_cache.synchronized {
val reader = reader_cache.remove(info.file);
if( reader!=null ) {
reader.release();
}
}
}
}
}
}
protected def onDelete(file:Long) = {
}
protected def onDelete(file:File) = {
if( recordLogTestSupport!=null ) {
recordLogTestSupport.deleteCall.call {
file.delete()
}
} else {
file.delete()
}
}
def checksum(data: Buffer): Int = {
val checksum = new CRC32
checksum.update(data.data, data.offset, data.length)
(checksum.getValue & 0xFFFFFFFF).toInt
}
class LogAppender(file:File, position:Long, var append_offset:Long=0L) extends LogReader(file, position) {
val info = new LogInfo(file, position, 0)
override def open = new RandomAccessFile(file, "rw")
override def on_close ={
force
}
val flushed_offset = new AtomicLong(append_offset)
def append_position = {
position+append_offset
}
// set the file size ahead of time so that we don't have to sync the file
// meta-data on every log sync.
if( append_offset==0 ) {
channel.position(logSize-1)
channel.write(new Buffer(1).toByteBuffer)
channel.force(true)
channel.position(0)
}
val write_buffer = new DataByteArrayOutputStream(BUFFER_SIZE+LOG_HEADER_SIZE)
def force = {
flush
max_log_flush_latency {
// only need to update the file metadata if the file size changes..
if( recordLogTestSupport!=null ) {
recordLogTestSupport.forceCall.call {
channel.force(append_offset > logSize)
}
} else {
channel.force(append_offset > logSize)
}
}
}
def skip(length:Long) = this.synchronized {
flush
append_offset += length
flushed_offset.addAndGet(length)
}
/**
* returns the offset position of the data record.
*/
def append(id:Byte, data: Buffer) = this.synchronized {
val record_position = append_position
val data_length = data.length
val total_length = LOG_HEADER_SIZE + data_length
if( write_buffer.position() + total_length > BUFFER_SIZE ) {
flush
}
val cs: Int = checksum(data)
// trace("Writing at: "+record_position+" len: "+data_length+" with checksum: "+cs)
if( false && total_length > BYPASS_BUFFER_SIZE ) {
// Write the header and flush..
write_buffer.writeByte(LOG_HEADER_PREFIX)
write_buffer.writeByte(id)
write_buffer.writeInt(cs)
write_buffer.writeInt(data_length)
append_offset += LOG_HEADER_SIZE
flush
// Directly write the data to the channel since it's large.
val buffer = data.toByteBuffer
val pos = append_offset+LOG_HEADER_SIZE
val remaining = buffer.remaining
if( recordLogTestSupport!=null ) {
recordLogTestSupport.writeCall.call {
channel.write(buffer, pos)
}
} else {
channel.write(buffer, pos)
}
flushed_offset.addAndGet(remaining)
if( buffer.hasRemaining ) {
throw new IOException("Short write")
}
append_offset += data_length
} else {
write_buffer.writeByte(LOG_HEADER_PREFIX)
write_buffer.writeByte(id)
write_buffer.writeInt(cs)
write_buffer.writeInt(data_length)
write_buffer.write(data.data, data.offset, data_length)
append_offset += total_length
}
(record_position, info)
}
def flush = max_log_flush_latency { this.synchronized {
if( write_buffer.position() > 0 ) {
val buffer = write_buffer.toBuffer.toByteBuffer
val remaining = buffer.remaining
val pos = append_offset-remaining
if( recordLogTestSupport!=null ) {
recordLogTestSupport.writeCall.call {
channel.write(buffer, pos)
}
} else {
channel.write(buffer, pos)
}
flushed_offset.addAndGet(remaining)
if( buffer.hasRemaining ) {
throw new IOException("Short write")
}
write_buffer.reset()
} }
}
override def check_read_flush(end_offset:Long) = {
if( flushed_offset.get() < end_offset ) {
flush
}
}
}
case class LogReader(file:File, position:Long) extends BaseRetained {
def open = new RandomAccessFile(file, "r")
val fd = open
val channel = fd.getChannel
override def dispose() {
on_close
fd.close()
}
def on_close = {}
def check_read_flush(end_offset:Long) = {}
def read(record_position:Long, length:Int) = {
val offset = record_position-position
assert(offset >=0 )
check_read_flush(offset+LOG_HEADER_SIZE+length)
if(verify_checksums) {
val record = new Buffer(LOG_HEADER_SIZE+length)
def record_is_not_changing = {
using(open) { fd =>
val channel = fd.getChannel
val new_record = new Buffer(LOG_HEADER_SIZE+length)
channel.read(new_record.toByteBuffer, offset)
var same = record == new_record
println(same)
same
}
}
if( channel.read(record.toByteBuffer, offset) != record.length ) {
assert( record_is_not_changing )
throw new IOException("short record at position: "+record_position+" in file: "+file+", offset: "+offset)
}
val is = new DataByteArrayInputStream(record)
val prefix = is.readByte()
if( prefix != LOG_HEADER_PREFIX ) {
assert(record_is_not_changing)
throw new IOException("invalid record at position: "+record_position+" in file: "+file+", offset: "+offset)
}
val id = is.readByte()
val expectedChecksum = is.readInt()
val expectedLength = is.readInt()
val data = is.readBuffer(length)
// If your reading the whole record we can verify the data checksum
if( expectedLength == length ) {
if( expectedChecksum != checksum(data) ) {
assert(record_is_not_changing)
throw new IOException("checksum does not match at position: "+record_position+" in file: "+file+", offset: "+offset)
}
}
data
} else {
val data = new Buffer(length)
var bb = data.toByteBuffer
var position = offset+LOG_HEADER_SIZE
while( bb.hasRemaining ) {
var count = channel.read(bb, position)
if( count == 0 ) {
throw new IOException("zero read at file '%s' offset: %d".format(file, position))
}
if( count < 0 ) {
throw new EOFException("File '%s' offset: %d".format(file, position))
}
position += count
}
data
}
}
def read(record_position:Long) = {
val offset = record_position-position
val header = new Buffer(LOG_HEADER_SIZE)
check_read_flush(offset+LOG_HEADER_SIZE)
channel.read(header.toByteBuffer, offset)
val is = header.bigEndianEditor();
val prefix = is.readByte()
if( prefix != LOG_HEADER_PREFIX ) {
// Does not look like a record.
throw new IOException("invalid record position %d (file: %s, offset: %d)".format(record_position, file.getAbsolutePath, offset))
}
val id = is.readByte()
val expectedChecksum = is.readInt()
val length = is.readInt()
val data = new Buffer(length)
check_read_flush(offset+LOG_HEADER_SIZE+length)
if( channel.read(data.toByteBuffer, offset+LOG_HEADER_SIZE) != length ) {
throw new IOException("short record")
}
if(verify_checksums) {
if( expectedChecksum != checksum(data) ) {
throw new IOException("checksum does not match")
}
}
(id, data, record_position+LOG_HEADER_SIZE+length)
}
def check(record_position:Long):Option[(Long, Option[Long])] = {
var offset = record_position-position
val header = new Buffer(LOG_HEADER_SIZE)
channel.read(header.toByteBuffer, offset)
val is = header.bigEndianEditor();
val prefix = is.readByte()
if( prefix != LOG_HEADER_PREFIX ) {
return None // Does not look like a record.
}
val kind = is.readByte()
val expectedChecksum = is.readInt()
val length = is.readInt()
val chunk = new Buffer(1024*4)
val chunkbb = chunk.toByteBuffer
offset += LOG_HEADER_SIZE
// Read the data in in chunks to avoid
// OOME if we are checking an invalid record
// with a bad record length
val checksumer = new CRC32
var remaining = length
while( remaining > 0 ) {
val chunkSize = remaining.min(1024*4);
chunkbb.position(0)
chunkbb.limit(chunkSize)
channel.read(chunkbb, offset)
if( chunkbb.hasRemaining ) {
return None
}
checksumer.update(chunk.data, 0, chunkSize)
offset += chunkSize
remaining -= chunkSize
}
val checksum = ( checksumer.getValue & 0xFFFFFFFF).toInt
if( expectedChecksum != checksum ) {
return None
}
val uow_start_pos = if(kind == UOW_END_RECORD && length==8) Some(decode_long(chunk)) else None
return Some(record_position+LOG_HEADER_SIZE+length, uow_start_pos)
}
def verifyAndGetEndOffset:Long = {
var pos = position;
var current_uow_start = pos
val limit = position+channel.size()
while(pos < limit) {
check(pos) match {
case Some((next, uow_start_pos)) =>
uow_start_pos.foreach { uow_start_pos =>
if( uow_start_pos == current_uow_start ) {
current_uow_start = next
} else {
return current_uow_start-position
}
}
pos = next
case None =>
return current_uow_start-position
}
}
return current_uow_start-position
}
}
def create_log_appender(position: Long, offset:Long) = {
new LogAppender(next_log(position), position, offset)
}
def create_appender(position: Long, offset:Long): Any = {
log_mutex.synchronized {
if(current_appender!=null) {
log_infos.put (position, new LogInfo(current_appender.file, current_appender.position, current_appender.append_offset))
}
current_appender = create_log_appender(position, offset)
log_infos.put(position, new LogInfo(current_appender.file, position, 0))
}
}
val max_log_write_latency = TimeMetric()
val max_log_flush_latency = TimeMetric()
val max_log_rotate_latency = TimeMetric()
def open(appender_size:Long= -1) = {
log_mutex.synchronized {
log_infos.clear()
LevelDBClient.find_sequence_files(directory, logSuffix).foreach { case (position,file) =>
log_infos.put(position, LogInfo(file, position, file.length()))
}
if( log_infos.isEmpty ) {
create_appender(0,0)
} else {
val file = log_infos.lastEntry().getValue
if( appender_size == -1 ) {
val r = LogReader(file.file, file.position)
try {
val endOffset = r.verifyAndGetEndOffset
using(new RandomAccessFile(file.file, "rw")) { file=>
try {
file.getChannel.truncate(endOffset)
}
catch {
case e:Throwable =>
e.printStackTrace()
}
file.getChannel.force(true)
}
create_appender(file.position,endOffset)
} finally {
r.release()
}
} else {
create_appender(file.position,appender_size)
}
}
}
}
def isOpen = {
log_mutex.synchronized {
current_appender!=null;
}
}
def close = {
log_mutex.synchronized {
if( current_appender!=null ) {
current_appender.release
}
}
}
def appender_limit = current_appender.append_position
def appender_start = current_appender.position
def next_log(position:Long) = LevelDBClient.create_sequence_file(directory, position, logSuffix)
def appender[T](func: (LogAppender)=>T):T= {
val intial_position = current_appender.append_position
try {
max_log_write_latency {
val rc = func(current_appender)
if( current_appender.append_position != intial_position ) {
// Record a UOW_END_RECORD so that on recovery we only replay full units of work.
current_appender.append(UOW_END_RECORD,encode_long(intial_position))
}
rc
}
} finally {
current_appender.flush
max_log_rotate_latency {
log_mutex.synchronized {
if ( current_appender.append_offset >= logSize ) {
rotate
}
}
}
}
}
def rotate[T] = log_mutex.synchronized {
current_appender.release()
on_log_rotate()
create_appender(current_appender.append_position, 0)
}
var on_log_rotate: ()=>Unit = ()=>{}
private val reader_cache = new LRUCache[File, LogReader](100) {
protected override def onCacheEviction(entry: Entry[File, LogReader]) = {
entry.getValue.release()
}
}
def log_info(pos:Long) = log_mutex.synchronized { Option(log_infos.floorEntry(pos)).map(_.getValue) }
def log_file_positions = log_mutex.synchronized {
import collection.JavaConversions._
log_infos.map(_._2.position).toArray
}
private def get_reader[T](record_position:Long)(func: (LogReader)=>T):Option[T] = {
val (info, appender) = log_mutex.synchronized {
log_info(record_position) match {
case None =>
warn("No reader available for position: %x, log_infos: %s", record_position, log_infos)
return None
case Some(info) =>
if(info.position == current_appender.position) {
current_appender.retain()
(info, current_appender)
} else {
(info, null)
}
}
}
val reader = if( appender!=null ) {
// read from the current appender.
appender
} else {
// Checkout a reader from the cache...
reader_cache.synchronized {
var reader = reader_cache.get(info.file)
if(reader==null) {
reader = LogReader(info.file, info.position)
reader_cache.put(info.file, reader)
}
reader.retain()
reader
}
}
try {
Some(func(reader))
} finally {
reader.release
}
}
def read(pos:Long) = {
get_reader(pos)(_.read(pos))
}
def read(pos:Long, length:Int) = {
get_reader(pos)(_.read(pos, length))
}
}

View File

@ -1,505 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.replicated
import org.linkedin.util.clock.Timespan
import scala.beans.BeanProperty
import org.apache.activemq.util.ServiceStopper
import org.apache.activemq.leveldb.{LevelDBClient, RecordLog, LevelDBStore}
import java.net.{NetworkInterface, InetAddress}
import org.fusesource.hawtdispatch._
import org.apache.activemq.broker.{LockableServiceSupport, Locker}
import org.apache.activemq.store.PersistenceAdapter
import java.util.concurrent.CountDownLatch
import java.util.concurrent.atomic.AtomicBoolean
import org.apache.activemq.leveldb.util.Log
import java.io.File
import org.apache.activemq.usage.SystemUsage
import org.apache.activemq.ActiveMQMessageAuditNoSync
import org.apache.activemq.broker.jmx.{OpenTypeSupport, BrokerMBeanSupport, AnnotatedMBean}
import javax.management.ObjectName
import javax.management.openmbean.{CompositeDataSupport, SimpleType, CompositeData}
import java.util
import org.apache.activemq.leveldb.replicated.groups._
object ElectingLevelDBStore extends Log {
def machine_hostname: String = {
import collection.JavaConversions._
// Get the host name of the first non loop-back interface..
for (interface <- NetworkInterface.getNetworkInterfaces; if (!interface.isLoopback); inet <- interface.getInetAddresses) {
var address = inet.getHostAddress
var name = inet.getCanonicalHostName
if( address!= name ) {
return name
}
}
// Or else just go the simple route.
return InetAddress.getLocalHost.getCanonicalHostName;
}
}
/**
*
*/
class ElectingLevelDBStore extends ProxyLevelDBStore {
import ElectingLevelDBStore._
def proxy_target = master
@BeanProperty
var zkAddress = "127.0.0.1:2181"
@BeanProperty
var zkPassword:String = _
@BeanProperty
var zkPath = "/default"
@BeanProperty
var zkSessionTimeout = "2s"
var brokerName: String = _
@BeanProperty
var container: String = _
@BeanProperty
var hostname: String = _
@BeanProperty
var connectUrl: String = _
@BeanProperty
var bind = "tcp://0.0.0.0:61619"
@BeanProperty
var weight = 1
@BeanProperty
var replicas = 3
@BeanProperty
var sync="quorum_mem"
def clusterSizeQuorum = (replicas/2) + 1
@BeanProperty
var securityToken = ""
var directory = LevelDBStore.DEFAULT_DIRECTORY;
override def setDirectory(dir: File) {
directory = dir
}
override def getDirectory: File = {
return directory
}
@BeanProperty
var logSize: Long = 1024 * 1024 * 100
@BeanProperty
var indexFactory: String = "org.fusesource.leveldbjni.JniDBFactory, org.iq80.leveldb.impl.Iq80DBFactory"
@BeanProperty
var verifyChecksums: Boolean = false
@BeanProperty
var indexMaxOpenFiles: Int = 1000
@BeanProperty
var indexBlockRestartInterval: Int = 16
@BeanProperty
var paranoidChecks: Boolean = false
@BeanProperty
var indexWriteBufferSize: Int = 1024 * 1024 * 6
@BeanProperty
var indexBlockSize: Int = 4 * 1024
@BeanProperty
var indexCompression: String = "snappy"
@BeanProperty
var logCompression: String = "none"
@BeanProperty
var indexCacheSize: Long = 1024 * 1024 * 256L
@BeanProperty
var flushDelay = 0
@BeanProperty
var asyncBufferSize = 1024 * 1024 * 4
@BeanProperty
var monitorStats = false
@BeanProperty
var failoverProducersAuditDepth = ActiveMQMessageAuditNoSync.DEFAULT_WINDOW_SIZE;
@BeanProperty
var maxFailoverProducersToTrack = ActiveMQMessageAuditNoSync.MAXIMUM_PRODUCER_COUNT;
var master: MasterLevelDBStore = _
var slave: SlaveLevelDBStore = _
var zk_client: ZKClient = _
var zk_group: ZooKeeperGroup = _
var position: Long = -1L
override def toString: String = {
return "Replicated LevelDB[%s, %s/%s]".format(directory.getAbsolutePath, zkAddress, zkPath)
}
var usageManager: SystemUsage = _
override def setUsageManager(usageManager: SystemUsage) {
this.usageManager = usageManager
}
def node_id = ReplicatedLevelDBStoreTrait.node_id(directory)
def init() {
if(brokerService!=null && brokerService.isUseJmx){
try {
AnnotatedMBean.registerMBean(brokerService.getManagementContext, new ReplicatedLevelDBStoreView(this), objectName)
} catch {
case e: Throwable => {
warn(e, "PersistenceAdapterReplication could not be registered in JMX: " + e.getMessage)
}
}
}
// Figure out our position in the store.
directory.mkdirs()
val log = new RecordLog(directory, LevelDBClient.LOG_SUFFIX)
log.logSize = logSize
log.open()
position = try {
log.current_appender.append_position
} finally {
log.close
}
zk_client = new ZKClient(zkAddress, Timespan.parse(zkSessionTimeout), null)
if( zkPassword!=null ) {
zk_client.setPassword(zkPassword)
}
zk_client.start
zk_client.waitForConnected(Timespan.parse("30s"))
zk_group = ZooKeeperGroupFactory.create(zk_client, zkPath)
val master_elector = new MasterElector(this)
debug("Starting ZooKeeper group monitor")
master_elector.start(zk_group)
debug("Joining ZooKeeper group")
master_elector.join
this.setUseLock(true)
this.setLocker(createDefaultLocker())
}
def createDefaultLocker(): Locker = new Locker {
def setLockable(lockable: LockableServiceSupport) {}
def configure(persistenceAdapter: PersistenceAdapter) {}
def setFailIfLocked(failIfLocked: Boolean) {}
def setLockAcquireSleepInterval(lockAcquireSleepInterval: Long) {}
def setName(name: String) {}
def start() = {
master_started_latch.await()
}
def keepAlive(): Boolean = {
master_started.get()
}
def stop() {}
}
val master_started_latch = new CountDownLatch(1)
val master_started = new AtomicBoolean(false)
def start_master(func: (Int) => Unit) = {
assert(master==null)
master = create_master()
master_started.set(true)
master.blocking_executor.execute(^{
master.start();
master_stopped.set(false)
master_started_latch.countDown()
})
master.blocking_executor.execute(^{
func(master.getPort)
})
}
def isMaster = master_started.get() && !master_stopped.get()
val stopped_latch = new CountDownLatch(1)
val master_stopped = new AtomicBoolean(false)
def stop_master(func: => Unit) = {
assert(master!=null)
master.blocking_executor.execute(^{
master.stop();
master_stopped.set(true)
position = master.wal_append_position
stopped_latch.countDown()
master = null
func
})
master.blocking_executor.execute(^{
val broker = brokerService
if( broker!=null ) {
try {
broker.requestRestart();
broker.stop();
} catch {
case e:Exception=> warn("Failure occurred while restarting the broker", e);
}
}
})
}
def objectName = {
var objectNameStr = BrokerMBeanSupport.createPersistenceAdapterName(brokerService.getBrokerObjectName.toString, "LevelDB[" + directory.getAbsolutePath + "]").toString
objectNameStr += "," + "view=Replication";
new ObjectName(objectNameStr);
}
protected def doStart() = {
master_started_latch.await()
}
protected def doStop(stopper: ServiceStopper) {
if(brokerService!=null && brokerService.isUseJmx){
brokerService.getManagementContext().unregisterMBean(objectName);
}
if (zk_group != null) {
zk_group.close
zk_group = null
}
if (zk_client != null) {
zk_client.close()
zk_client = null
}
if( master!=null ) {
val latch = new CountDownLatch(1)
stop_master {
latch.countDown()
}
latch.await()
}
if( slave !=null ) {
val latch = new CountDownLatch(1)
stop_slave {
latch.countDown()
}
latch.await()
}
if( master_started.get() ) {
stopped_latch.countDown()
}
}
def start_slave(address: String)(func: => Unit) = {
assert(master==null)
slave = create_slave()
slave.connect = address
slave.blocking_executor.execute(^{
slave.start();
func
})
}
def stop_slave(func: => Unit) = {
if( slave!=null ) {
val s = slave
slave = null
s.blocking_executor.execute(^{
s.stop();
position = s.wal_append_position
func
})
}
}
def create_slave() = {
val slave = new SlaveLevelDBStore();
configure(slave)
slave
}
def create_master() = {
val master = new MasterLevelDBStore
configure(master)
master.replicas = replicas
master.bind = bind
master.syncTo = sync
master
}
override def setBrokerName(brokerName: String): Unit = {
this.brokerName = brokerName
}
override def deleteAllMessages {
if(proxy_target != null) proxy_target.deleteAllMessages
else {
info("You instructed the broker to delete all messages (on startup?). " +
"Cannot delete all messages from an ElectingLevelDBStore because we need to decide who the master is first")
}
}
def configure(store: ReplicatedLevelDBStoreTrait) {
store.directory = directory
store.indexFactory = indexFactory
store.verifyChecksums = verifyChecksums
store.indexMaxOpenFiles = indexMaxOpenFiles
store.indexBlockRestartInterval = indexBlockRestartInterval
store.paranoidChecks = paranoidChecks
store.indexWriteBufferSize = indexWriteBufferSize
store.indexBlockSize = indexBlockSize
store.indexCompression = indexCompression
store.logCompression = logCompression
store.indexCacheSize = indexCacheSize
store.flushDelay = flushDelay
store.asyncBufferSize = asyncBufferSize
store.monitorStats = monitorStats
store.securityToken = securityToken
store.setFailoverProducersAuditDepth(failoverProducersAuditDepth)
store.setMaxFailoverProducersToTrack(maxFailoverProducersToTrack)
store.setBrokerName(brokerName)
store.setBrokerService(brokerService)
store.setUsageManager(usageManager)
}
def address(port: Int) = {
if( connectUrl==null ) {
if (hostname == null) {
hostname = machine_hostname
}
"tcp://" + hostname + ":" + port
} else {
connectUrl;
}
}
override def size: Long = {
if( master !=null ) {
master.size
} else if( slave !=null ) {
slave.size
} else {
var rc = 0L
if( directory.exists() ) {
for( f <- directory.list() ) {
if( f.endsWith(LevelDBClient.LOG_SUFFIX)) {
rc += f.length
}
}
}
rc
}
}
}
class ReplicatedLevelDBStoreView(val store:ElectingLevelDBStore) extends ReplicatedLevelDBStoreViewMBean {
import store._
def getZkAddress = zkAddress
def getZkPath = zkPath
def getZkSessionTimeout = zkSessionTimeout
def getBind = bind
def getReplicas = replicas
def getNodeRole:String = {
if( slave!=null ) {
return "slave"
}
if( master!=null ) {
return "master"
}
"electing"
}
def getStatus:String = {
if( slave!=null ) {
return slave.status
}
if( master!=null ) {
return master.status
}
""
}
object SlaveStatusOTF extends OpenTypeSupport.AbstractOpenTypeFactory {
protected def getTypeName: String = classOf[SlaveStatus].getName
protected override def init() = {
super.init();
addItem("nodeId", "nodeId", SimpleType.STRING);
addItem("remoteAddress", "remoteAddress", SimpleType.STRING);
addItem("attached", "attached", SimpleType.BOOLEAN);
addItem("position", "position", SimpleType.LONG);
}
override def getFields(o: Any): util.Map[String, AnyRef] = {
val status = o.asInstanceOf[SlaveStatus]
val rc = super.getFields(o);
rc.put("nodeId", status.nodeId);
rc.put("remoteAddress", status.remoteAddress);
rc.put("attached", status.attached.asInstanceOf[java.lang.Boolean]);
rc.put("position", status.position.asInstanceOf[java.lang.Long]);
rc
}
}
def getSlaves():Array[CompositeData] = {
if( master!=null ) {
master.slaves_status.map { status =>
val fields = SlaveStatusOTF.getFields(status);
new CompositeDataSupport(SlaveStatusOTF.getCompositeType(), fields).asInstanceOf[CompositeData]
}.toArray
} else {
Array()
}
}
def getPosition:java.lang.Long = {
if( slave!=null ) {
return new java.lang.Long(slave.wal_append_position)
}
if( master!=null ) {
return new java.lang.Long(master.wal_append_position)
}
null
}
def getPositionDate:java.lang.Long = {
val rc = if( slave!=null ) {
slave.wal_date
} else if( master!=null ) {
master.wal_date
} else {
0
}
if( rc != 0 ) {
return new java.lang.Long(rc)
} else {
return null
}
}
def getDirectory = directory.getCanonicalPath
def getSync = sync
def getNodeId: String = node_id
}

View File

@ -1,247 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.replicated
import org.apache.activemq.leveldb.replicated.groups._
import com.fasterxml.jackson.annotation.JsonProperty
import org.apache.activemq.leveldb.util.{Log, JsonCodec}
import java.io.IOException
class LevelDBNodeState extends NodeState {
@JsonProperty
var id: String = _
@JsonProperty
var container: String = _
@JsonProperty
var address: String = _
@JsonProperty
var position: Long = -1
@JsonProperty
var weight: Int = 0
@JsonProperty
var elected: String = _
override def equals(obj: Any): Boolean = {
obj match {
case x:LevelDBNodeState =>
x.id == id &&
x.container == container &&
x.address == address &&
x.position == position &&
x.elected == elected
case _ => false
}
}
override
def toString = JsonCodec.encode(this).ascii().toString
}
object MasterElector extends Log
/**
*/
class MasterElector(store: ElectingLevelDBStore) extends ClusteredSingleton[LevelDBNodeState](classOf[LevelDBNodeState]) {
import MasterElector._
var last_state: LevelDBNodeState = _
var elected: String = _
var position: Long = -1
var address: String = _
var updating_store = false
var next_connect: String = _
var connected_address: String = _
def join: Unit = this.synchronized {
last_state = create_state
join(last_state)
add(change_listener)
}
def elector = this
def update: Unit = elector.synchronized {
var next = create_state
if (next != last_state) {
last_state = next
join(next)
}
}
def create_state = {
val rc = new LevelDBNodeState
rc.id = store.brokerName
rc.elected = elected
rc.position = position
rc.weight = store.weight
rc.address = address
rc.container = store.container
rc.address = address
rc
}
object change_listener extends ChangeListener {
def connected = changed
def disconnected = {
changed
}
var stopped = false;
def changed:Unit = elector.synchronized {
debug("ZooKeeper group changed: %s", members)
// info(eid+" cluster state changed: "+members)
if (isMaster) {
// We are the master elector, we will choose which node will startup the MasterLevelDBStore
members.get(store.brokerName) match {
case None =>
info("Not enough cluster members connected to elect a new master.")
case Some(members) =>
if (members.size > store.replicas) {
warn("Too many cluster members are connected. Expected at most "+store.replicas+
" members but there are "+members.size+" connected.")
}
if (members.size < store.clusterSizeQuorum) {
info("Not enough cluster members connected to elect a master.")
elected = null
} else {
// If we already elected a master, lets make sure he is still online..
if (elected != null) {
val by_eid = Map(members: _*)
if (by_eid.get(elected).isEmpty) {
info("Previously elected master is not online, staring new election")
elected = null
}
}
// Do we need to elect a new master?
if (elected == null) {
// Find the member with the most updates.
val sortedMembers = members.filter(_._2.position >= 0).sortWith {
(a, b) => {
a._2.position > b._2.position ||
(a._2.position == b._2.position && a._2.weight > b._2.weight )
}
}
if (sortedMembers.size != members.size) {
info("Not enough cluster members have reported their update positions yet.")
} else {
// We now have an election.
elected = sortedMembers.head._1
}
}
// Sort by the positions in the cluster..
}
}
} else {
// Only the master sets the elected field.
elected = null
}
val master_elected = if(eid==null) null else master.map(_.elected).getOrElse(null)
// If no master is currently elected, we need to report our current store position.
// Since that will be used to select the master.
val connect_target = if (master_elected != null) {
position = -1
members.get(store.brokerName).get.find(_._1 == master_elected).map(_._2.address).getOrElse(null)
} else {
// Once we are not running a master or server, report the position..
if( connected_address==null && address==null && !updating_store ) {
position = store.position
}
null
}
// Do we need to stop the running master?
if ((eid==null || master_elected != eid) && address!=null && !updating_store) {
info("Demoted to slave")
updating_store = true
store.stop_master {
elector.synchronized {
updating_store = false
info("Master stopped")
address = null
changed
}
}
}
// Have we been promoted to being the master?
if (eid!=null && master_elected == eid && address==null && !updating_store ) {
info("Promoted to master")
updating_store = true
store.start_master { port =>
elector.synchronized {
updating_store = false
address = store.address(port)
info("Master started: "+address)
changed
}
}
}
// Can we become a slave?
if ( (eid==null || master_elected != eid) && address == null) {
// Did the master address change?
if (connect_target != connected_address) {
// Do we need to setup a new slave.
if (connect_target != null && !updating_store) {
updating_store = true
store.start_slave(connect_target) {
elector.synchronized {
updating_store=false
info("Slave started")
connected_address = connect_target
changed
}
}
}
// Lets stop the slave..
if (connect_target == null && !updating_store) {
updating_store = true
store.stop_slave {
elector.synchronized {
updating_store=false
info("Slave stopped")
connected_address = null
changed
}
}
}
}
}
if( group.zk.isConnected ) {
update
}
}
}
}

View File

@ -1,162 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.replicated
import org.apache.activemq.leveldb.util._
import FileSupport._
import java.io._
import org.apache.activemq.leveldb.{RecordLog, LevelDBClient}
import java.util
import org.apache.activemq.leveldb.replicated.dto.{SyncResponse, FileInfo}
/**
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
object MasterLevelDBClient extends Log {
val MANIFEST_SUFFIX = ".mf"
val LOG_SUFFIX = LevelDBClient.LOG_SUFFIX
val INDEX_SUFFIX = LevelDBClient.INDEX_SUFFIX
}
/**
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class MasterLevelDBClient(val store:MasterLevelDBStore) extends LevelDBClient(store) {
import MasterLevelDBClient._
import collection.JavaConversions._
var snapshots_pending_delete = new util.TreeSet[Long]()
def slave_held_snapshots = {
val rc = new util.HashSet[Long]()
for( v <- store.slaves.values() ; s <- v.held_snapshot ) {
rc.add(s)
}
rc
}
override def replaceLatestSnapshotDirectory(newSnapshotIndexPos: Long) {
if( slave_held_snapshots.contains(lastIndexSnapshotPos) ) {
// is a slave is holding open a snapshot.. lets not delete it's data just yet...
snapshots_pending_delete.add(newSnapshotIndexPos)
lastIndexSnapshotPos = newSnapshotIndexPos
} else {
super.replaceLatestSnapshotDirectory(newSnapshotIndexPos)
}
}
override def gc(topicPositions: Seq[(Long, Long)]) {
val snapshots_to_rm = new util.HashSet(snapshots_pending_delete)
snapshots_to_rm.removeAll(slave_held_snapshots);
for ( snapshot <- snapshots_to_rm ) {
snapshotIndexFile(snapshot).recursiveDelete
}
super.gc(topicPositions)
}
override def oldest_retained_snapshot: Long = {
if ( snapshots_pending_delete.isEmpty ) {
super.oldest_retained_snapshot
} else {
snapshots_pending_delete.first()
}
}
def snapshot_state(snapshot_id:Long) = {
def info(file:File) = {
val rc = new FileInfo
rc.file = file.getName
rc.length = file.length()
rc
}
val rc = new SyncResponse
rc.snapshot_position = snapshot_id
rc.wal_append_position = log.current_appender.append_position
for( file <- logDirectory.listFiles; if file.getName.endsWith(LOG_SUFFIX) ) {
// Only need to sync up to what's been flushed.
val fileInfo = info(file)
if( log.current_appender.file == file ) {
rc.append_log = file.getName
fileInfo.length = log.current_appender.flushed_offset.get()
fileInfo.crc32 = file.crc32(fileInfo.length)
} else {
fileInfo.crc32 = file.cached_crc32
}
rc.log_files.add(fileInfo)
}
val index_dir = LevelDBClient.create_sequence_file(directory, snapshot_id, INDEX_SUFFIX)
if( index_dir.exists() ) {
for( file <- index_dir.listFiles ) {
val name = file.getName
if( name !="LOCK" ) {
rc.index_files.add(info(file))
}
}
}
rc
}
// Override the log appender implementation so that it
// stores the logs on the local and remote file systems.
override def createLog = new RecordLog(directory, LOG_SUFFIX) {
override def create_log_appender(position: Long, offset:Long) = {
new LogAppender(next_log(position), position, offset) {
val file_name = file.getName
override def flush = this.synchronized {
val offset = flushed_offset.get()
super.flush
val length = flushed_offset.get() - offset;
store.replicate_wal(file, position, offset, length)
}
override def force = {
import MasterLevelDBStore._
if( (store.syncToMask & SYNC_TO_DISK) != 0) {
super.force
}
if( (store.syncToMask & SYNC_TO_REMOTE) != 0) {
flush
store.wal_sync_to(position+flushed_offset.get())
}
}
override def on_close {
super.force
}
}
}
override protected def onDelete(file: Long) = {
super.onDelete(file)
store.replicate_log_delete(file)
}
}
}

View File

@ -1,468 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.replicated
import org.apache.activemq.leveldb.LevelDBStore
import org.apache.activemq.util.ServiceStopper
import org.apache.activemq.leveldb.util.FileSupport._
import org.apache.activemq.leveldb.util.{JsonCodec, Log}
import org.fusesource.hawtdispatch._
import org.apache.activemq.leveldb.replicated.dto._
import org.fusesource.hawtdispatch.transport._
import java.util.concurrent._
import java.io.{IOException, File}
import java.net.{SocketAddress, InetSocketAddress, URI}
import java.util.concurrent.atomic.{AtomicBoolean, AtomicLong}
import scala.beans.BeanProperty
import org.fusesource.hawtbuf.{Buffer, AsciiBuffer}
class PositionSync(val position:Long, count:Int) extends CountDownLatch(count)
object MasterLevelDBStore extends Log {
val SYNC_TO_DISK = 0x01
val SYNC_TO_REMOTE = 0x02
val SYNC_TO_REMOTE_MEMORY = 0x04 | SYNC_TO_REMOTE
val SYNC_TO_REMOTE_DISK = 0x08 | SYNC_TO_REMOTE
}
case class SlaveStatus(nodeId:String, remoteAddress:String, attached:Boolean, position:Long)
/**
*/
class MasterLevelDBStore extends LevelDBStore with ReplicatedLevelDBStoreTrait {
import MasterLevelDBStore._
import collection.JavaConversions._
import ReplicationSupport._
@BeanProperty
var bind = "tcp://0.0.0.0:61619"
@BeanProperty
var replicas = 3
def minSlaveAcks = replicas/2
var _syncTo="quorum_mem"
var syncToMask=SYNC_TO_REMOTE_MEMORY
@BeanProperty
def syncTo = _syncTo
@BeanProperty
def syncTo_=(value:String) {
_syncTo = value
syncToMask = 0
for( v <- value.split(",").map(_.trim.toLowerCase) ) {
v match {
case "" =>
case "local_mem" =>
case "local_disk" => syncToMask |= SYNC_TO_DISK
case "remote_mem" => syncToMask |= SYNC_TO_REMOTE_MEMORY
case "remote_disk" => syncToMask |= SYNC_TO_REMOTE_DISK
case "quorum_mem" => syncToMask |= SYNC_TO_REMOTE_MEMORY
case "quorum_disk" => syncToMask |= SYNC_TO_REMOTE_DISK | SYNC_TO_DISK
case x => warn("Unknown syncTo value: [%s]", x)
}
}
}
val slaves = new ConcurrentHashMap[String,SlaveState]()
def slaves_status = slaves.values().map(_.status)
def status = {
var caughtUpCounter = 0
var notCaughtUpCounter = 0
for( slave <- slaves.values() ) {
if( slave.isCaughtUp ) {
caughtUpCounter += 1
} else {
notCaughtUpCounter += 1
}
}
var rc = ""
if( notCaughtUpCounter > 0 ) {
rc += "%d slave nodes attaching. ".format(notCaughtUpCounter)
}
if( caughtUpCounter > 0 ) {
rc += "%d slave nodes attached. ".format(caughtUpCounter)
}
rc
}
override def doStart = {
unstash(directory)
super.doStart
start_protocol_server
// Lets not complete the startup until at least one slave is synced up.
wal_sync_to(wal_append_position)
}
override def doStop(stopper: ServiceStopper): Unit = {
if( transport_server!=null ) {
stop_protocol_server
transport_server = null
}
super.doStop(stopper)
}
override def createClient = new MasterLevelDBClient(this)
def master_client = client.asInstanceOf[MasterLevelDBClient]
//////////////////////////////////////
// Replication Protocol Stuff
//////////////////////////////////////
var transport_server:TransportServer = _
val start_latch = new CountDownLatch(1)
def start_protocol_server = {
transport_server = new TcpTransportServer(new URI(bind))
transport_server.setBlockingExecutor(blocking_executor)
transport_server.setDispatchQueue(createQueue("master: "+node_id))
transport_server.setTransportServerListener(new TransportServerListener(){
def onAccept(transport: Transport) {
transport.setDispatchQueue(createQueue("connection from "+transport.getRemoteAddress))
transport.setBlockingExecutor(blocking_executor)
new Session(transport).start
}
def onAcceptError(error: Exception) {
warn(error)
}
})
transport_server.start(^{
start_latch.countDown()
})
start_latch.await()
}
def getPort = {
start_latch.await()
transport_server.getSocketAddress.asInstanceOf[InetSocketAddress].getPort
}
def stop_protocol_server = {
transport_server.stop(NOOP)
}
class Session(transport: Transport) extends TransportHandler(transport) {
var login:Login = _
var slave_state:SlaveState = _
var disconnected = false
def queue = transport.getDispatchQueue
override def onTransportFailure(error: IOException) {
if( !disconnected ) {
warn("Unexpected session error: "+error)
}
super.onTransportFailure(error)
}
def onTransportCommand(command: Any) = {
command match {
case command:ReplicationFrame =>
command.action match {
case LOGIN_ACTION =>
handle_login(JsonCodec.decode(command.body, classOf[Login]))
case SYNC_ACTION =>
handle_sync()
case GET_ACTION =>
handle_get(JsonCodec.decode(command.body, classOf[Transfer]))
case ACK_ACTION =>
handle_ack(JsonCodec.decode(command.body, classOf[WalAck]))
case DISCONNECT_ACTION =>
handle_disconnect()
case _ =>
sendError("Unknown frame action: "+command.action)
}
}
}
def handle_login(request:Login):Unit = {
if( request.security_token != securityToken ) {
sendError("Invalid security_token");
} else {
login = request;
sendOk(null)
}
}
override def onTransportDisconnected() {
val slave_state = this.slave_state;
if( slave_state !=null ) {
this.slave_state=null
if( slave_state.stop(this) && isStarted ) {
slaves.remove(slave_state.slave_id, slave_state)
}
}
}
def handle_disconnect():Unit = {
disconnected = true;
sendOk(null)
}
def handle_sync():Unit = {
if( login == null ) {
sendError("Not logged in")
return;
}
debug("handle_sync")
slave_state = slaves.get(login.node_id)
if ( slave_state == null ) {
slave_state = new SlaveState(login.node_id)
slaves.put(login.node_id, slave_state)
}
slave_state.start(Session.this)
}
def handle_ack(req:WalAck):Unit = {
if( login == null || slave_state == null) {
return;
}
trace("%s: Got WAL ack, position: %d, from: %s", directory, req.position, slave_state.slave_id)
slave_state.position_update(req.position)
}
def handle_get(req:Transfer):Unit = {
if( login == null ) {
sendError("Not logged in")
return;
}
val file = if( req.file.startsWith("log/" ) ) {
client.logDirectory / req.file.stripPrefix("log/")
} else {
client.directory / req.file
}
if( !file.exists() ) {
sendError("file does not exist")
return
}
val length = file.length()
if( req.offset > length ) {
sendError("Invalid offset")
return
}
if( req.offset+req.length > length ) {
sendError("Invalid length")
}
sendOk(null)
send(new FileTransferFrame(file, req.offset, req.length))
}
}
class SlaveState(val slave_id:String) {
var held_snapshot:Option[Long] = None
var session:Session = _
var position = new AtomicLong(0)
var caughtUp = new AtomicBoolean(false)
var socketAddress:SocketAddress = _
def start(session:Session) = {
debug("SlaveState:start")
socketAddress = session.transport.getRemoteAddress
session.queue.setLabel(transport_server.getDispatchQueue.getLabel+" -> "+slave_id)
val resp = this.synchronized {
if( this.session!=null ) {
this.session.transport.stop(NOOP)
}
this.session = session
val snapshot_id = client.lastIndexSnapshotPos
held_snapshot = Option(snapshot_id)
position.set(0)
master_client.snapshot_state(snapshot_id)
}
info("Slave has connected: "+slave_id)
session.queue {
session.sendOk(resp)
}
}
def stop(session:Session) = {
this.synchronized {
if( this.session == session ) {
info("Slave has disconnected: "+slave_id)
true
} else {
false
}
}
}
def queue(func: (Session)=>Unit) = {
val h = this.synchronized {
session
}
if( h !=null ) {
h.queue {
func(session)
}
}
}
def replicate(value:LogDelete):Unit = {
val frame = new ReplicationFrame(LOG_DELETE_ACTION, JsonCodec.encode(value))
queue { session =>
session.send(frame)
}
}
var unflushed_replication_frame:DeferredReplicationFrame = null
class DeferredReplicationFrame(file:File, val position:Long, _offset:Long, initialLength:Long) extends ReplicationFrame(WAL_ACTION, null) {
val fileTransferFrame = new FileTransferFrame(file, _offset, initialLength)
var encoded:Buffer = null
def offset = fileTransferFrame.offset
def length = fileTransferFrame.length
override def body: Buffer = {
if( encoded==null ) {
val value = new LogWrite
value.file = position;
value.offset = offset;
value.sync = (syncToMask & SYNC_TO_REMOTE_DISK)!=0
value.length = fileTransferFrame.length
value.date = date
encoded = JsonCodec.encode(value)
}
encoded
}
}
def replicate(file:File, position:Long, offset:Long, length:Long):Unit = {
queue { session =>
// Check to see if we can merge the replication event /w the previous event..
if( unflushed_replication_frame == null ||
unflushed_replication_frame.position!=position ||
(unflushed_replication_frame.offset+unflushed_replication_frame.length)!=offset ) {
// We could not merge the replication event /w the previous event..
val frame = new DeferredReplicationFrame(file, position, offset, length)
unflushed_replication_frame = frame
session.send(frame, ()=>{
trace("%s: Sent WAL update: (file:%s, offset: %d, length: %d) to %s", directory, file, frame.offset, frame.length, slave_id)
if( unflushed_replication_frame eq frame ) {
unflushed_replication_frame = null
}
})
session.send(frame.fileTransferFrame)
} else {
// We were able to merge.. yay!
assert(unflushed_replication_frame.encoded == null)
unflushed_replication_frame.fileTransferFrame.length += length
}
}
}
def position_update(position:Long) = {
this.position.getAndSet(position)
check_position_sync
}
@volatile
var last_position_sync:PositionSync = null
def check_position_sync = {
val p = position_sync
if( last_position_sync!=p ) {
if( position.get >= p.position ) {
if( caughtUp.compareAndSet(false, true) ) {
info("Slave has now caught up: "+slave_id)
this.synchronized {
this.held_snapshot = None
}
}
p.countDown
last_position_sync = p
}
}
}
def isCaughtUp = caughtUp.get()
def status = SlaveStatus(slave_id, socketAddress.toString, isCaughtUp, position.get())
}
@volatile
var position_sync = new PositionSync(0L, 0)
def wal_sync_to(position:Long):Unit = {
if( minSlaveAcks<1 || (syncToMask & SYNC_TO_REMOTE)==0) {
return
}
if( isStoppedOrStopping ) {
throw new IllegalStateException("Store replication stopped")
}
val position_sync = new PositionSync(position, minSlaveAcks)
this.position_sync = position_sync
for( slave <- slaves.values() ) {
slave.check_position_sync
}
while( !position_sync.await(1, TimeUnit.SECONDS) ) {
if( isStoppedOrStopping ) {
throw new IllegalStateException("Store replication stopped")
}
warn("Store update waiting on %d replica(s) to catch up to log position %d. %s", minSlaveAcks, position, status)
}
}
def isStoppedOrStopping: Boolean = {
if( isStopped || isStopping )
return true
if( broker_service!=null && broker_service.isStopping )
return true
false
}
def date = System.currentTimeMillis()
def replicate_wal(file:File, position:Long, offset:Long, length:Long):Unit = {
if( length > 0 ) {
for( slave <- slaves.values() ) {
slave.replicate(file, position, offset, length)
}
}
}
def replicate_log_delete(log:Long):Unit = {
val value = new LogDelete
value.log = log
for( slave <- slaves.values() ) {
slave.replicate(value)
}
}
def wal_append_position = client.wal_append_position
@volatile
var wal_date = 0L
}

View File

@ -1,137 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.replicated
import org.apache.activemq.broker.{LockableServiceSupport, BrokerService, BrokerServiceAware, ConnectionContext}
import org.apache.activemq.command._
import org.apache.activemq.leveldb.LevelDBStore
import org.apache.activemq.store._
import org.apache.activemq.usage.SystemUsage
import java.io.File
import java.io.IOException
import java.util.Set
import org.apache.activemq.util.{ServiceStopper, ServiceSupport}
import org.apache.activemq.broker.scheduler.JobSchedulerStore
/**
*/
abstract class ProxyLevelDBStore extends LockableServiceSupport with BrokerServiceAware with PersistenceAdapter with TransactionStore with PListStore {
def proxy_target: LevelDBStore
def beginTransaction(context: ConnectionContext) {
proxy_target.beginTransaction(context)
}
def getLastProducerSequenceId(id: ProducerId): Long = {
return proxy_target.getLastProducerSequenceId(id)
}
def createTopicMessageStore(destination: ActiveMQTopic): TopicMessageStore = {
return proxy_target.createTopicMessageStore(destination)
}
def createJobSchedulerStore():JobSchedulerStore = {
throw new UnsupportedOperationException();
}
def setDirectory(dir: File) {
proxy_target.setDirectory(dir)
}
def checkpoint(sync: Boolean) {
proxy_target.checkpoint(sync)
}
def createTransactionStore: TransactionStore = {
return proxy_target.createTransactionStore
}
def setUsageManager(usageManager: SystemUsage) {
proxy_target.setUsageManager(usageManager)
}
def commitTransaction(context: ConnectionContext) {
proxy_target.commitTransaction(context)
}
def getLastMessageBrokerSequenceId: Long = {
return proxy_target.getLastMessageBrokerSequenceId
}
def setBrokerName(brokerName: String) {
proxy_target.setBrokerName(brokerName)
}
def rollbackTransaction(context: ConnectionContext) {
proxy_target.rollbackTransaction(context)
}
def removeTopicMessageStore(destination: ActiveMQTopic) {
proxy_target.removeTopicMessageStore(destination)
}
def getDirectory: File = {
return proxy_target.getDirectory
}
def size: Long = {
return proxy_target.size
}
def removeQueueMessageStore(destination: ActiveMQQueue) {
proxy_target.removeQueueMessageStore(destination)
}
def createQueueMessageStore(destination: ActiveMQQueue): MessageStore = {
return proxy_target.createQueueMessageStore(destination)
}
def deleteAllMessages {
proxy_target.deleteAllMessages
}
def getDestinations: Set[ActiveMQDestination] = {
return proxy_target.getDestinations
}
def rollback(txid: TransactionId) {
proxy_target.rollback(txid)
}
def recover(listener: TransactionRecoveryListener) {
proxy_target.recover(listener)
}
def prepare(txid: TransactionId) {
proxy_target.prepare(txid)
}
def commit(txid: TransactionId, wasPrepared: Boolean, preCommit: Runnable, postCommit: Runnable) {
proxy_target.commit(txid, wasPrepared, preCommit, postCommit)
}
def getPList(name: String): PList = {
return proxy_target.getPList(name)
}
def removePList(name: String): Boolean = {
return proxy_target.removePList(name)
}
def allowIOResumption() = {}
}

View File

@ -1,66 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.replicated
import scala.beans.BeanProperty
import java.util.UUID
import org.apache.activemq.leveldb.LevelDBStore
import org.apache.activemq.leveldb.util.FileSupport._
import java.io.File
object ReplicatedLevelDBStoreTrait {
def create_uuid = UUID.randomUUID().toString
def node_id(directory:File):String = {
val nodeid_file = directory / "nodeid.txt"
if( nodeid_file.exists() ) {
nodeid_file.readText()
} else {
val rc = create_uuid
nodeid_file.getParentFile.mkdirs()
nodeid_file.writeText(rc)
rc
}
}
}
/**
*/
trait ReplicatedLevelDBStoreTrait extends LevelDBStore {
@BeanProperty
var securityToken = ""
def node_id = ReplicatedLevelDBStoreTrait.node_id(directory)
def storeId:String = {
val storeid_file = directory / "storeid.txt"
if( storeid_file.exists() ) {
storeid_file.readText()
} else {
null
}
}
def storeId_=(value:String) {
val storeid_file = directory / "storeid.txt"
storeid_file.writeText(value)
}
}

View File

@ -1,109 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.replicated
import org.fusesource.hawtbuf.{Buffer, AsciiBuffer}
import org.fusesource.hawtdispatch.transport.AbstractProtocolCodec
import org.fusesource.hawtdispatch.transport.AbstractProtocolCodec.Action
import java.nio.{MappedByteBuffer, ByteBuffer}
import org.fusesource.hawtdispatch.Task
import java.io.{OutputStream, File}
import org.fusesource.hawtdispatch.transport.ProtocolCodec.BufferState
import java.util
class ReplicationFrame(val action:AsciiBuffer, _body:Buffer) {
def body = _body
}
class FileTransferFrame(val file:File, val offset:Long, var length:Long)
class ReplicationProtocolCodec extends AbstractProtocolCodec {
import ReplicationSupport._
val transfers = new util.LinkedList[MappedByteBuffer]();
def encode(value: Any) {
value match {
case value:ReplicationFrame =>
value.action.writeTo(nextWriteBuffer.asInstanceOf[OutputStream])
nextWriteBuffer.write('\n');
if( value.body!=null ) {
value.body.writeTo(nextWriteBuffer.asInstanceOf[OutputStream])
}
nextWriteBuffer.write(0);
case value:FileTransferFrame =>
if( value.length > 0 ) {
val buffer = map(value.file, value.offset, value.length, true)
writeDirect(buffer);
if( buffer.hasRemaining ) {
transfers.addLast(buffer)
} else {
unmap(buffer)
}
}
case value:Buffer =>
value.writeTo(nextWriteBuffer.asInstanceOf[OutputStream])
}
}
override def flush(): BufferState = {
val rc = super.flush()
while( !transfers.isEmpty && !transfers.peekFirst().hasRemaining) {
unmap(transfers.removeFirst())
}
rc
}
def initialDecodeAction() = readHeader
val readHeader = new Action() {
def apply = {
val action_line:Buffer = readUntil('\n'.toByte, 80)
if( action_line!=null ) {
action_line.moveTail(-1);
nextDecodeAction = readReplicationFrame(action_line.ascii())
nextDecodeAction.apply()
} else {
null
}
}
}
def readReplicationFrame(action:AsciiBuffer):Action = new Action() {
def apply = {
val data:Buffer = readUntil(0.toByte, 1024*64)
if( data!=null ) {
data.moveTail(-1);
nextDecodeAction = readHeader
new ReplicationFrame(action, data)
} else {
null
}
}
}
def readData(data_target:ByteBuffer, cb:Task) = {
nextDecodeAction = new Action() {
def apply = {
if( readDirect(data_target) ) {
nextDecodeAction = readHeader
cb.run()
}
null
}
}
}
}

View File

@ -1,126 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.replicated
import org.fusesource.hawtbuf.Buffer._
import java.util.concurrent._
import java.nio.MappedByteBuffer
import sun.nio.ch.DirectBuffer
import java.io.{RandomAccessFile, File}
import java.nio.channels.FileChannel
import java.util.concurrent.atomic.AtomicInteger
import org.fusesource.hawtdispatch._
import org.apache.activemq.leveldb.util.FileSupport._
import org.apache.activemq.leveldb.LevelDBClient
import scala.collection.immutable.TreeMap
object ReplicationSupport {
val WAL_ACTION = ascii("wal")
val LOGIN_ACTION= ascii("LevelDB Store Replication v1:login")
val SYNC_ACTION = ascii("sync")
val GET_ACTION = ascii("get")
val ACK_ACTION = ascii("ack")
val OK_ACTION = ascii("ok")
val DISCONNECT_ACTION = ascii("disconnect")
val ERROR_ACTION = ascii("error")
val LOG_DELETE_ACTION = ascii("rm")
def unmap(buffer:MappedByteBuffer ) {
try {
buffer.asInstanceOf[DirectBuffer].cleaner().clean();
} catch {
case ignore:Throwable =>
}
}
def map(file:File, offset:Long, length:Long, readOnly:Boolean) = {
val raf = new RandomAccessFile(file, if(readOnly) "r" else "rw");
try {
val mode = if (readOnly) FileChannel.MapMode.READ_ONLY else FileChannel.MapMode.READ_WRITE
raf.getChannel().map(mode, offset, length);
} finally {
raf.close();
}
}
def stash(directory:File) {
directory.mkdirs()
val tmp_stash = directory / "stash.tmp"
val stash = directory / "stash"
stash.recursiveDelete
tmp_stash.recursiveDelete
tmp_stash.mkdirs()
copy_store_dir(directory, tmp_stash)
tmp_stash.renameTo(stash)
}
def copy_store_dir(from:File, to:File) = {
val log_files = LevelDBClient.find_sequence_files(from, LevelDBClient.LOG_SUFFIX)
if( !log_files.isEmpty ) {
val append_file = log_files.last._2
for( file <- log_files.values ; if file != append_file) {
file.linkTo(to / file.getName)
val crc_file = file.getParentFile / (file.getName+".crc32" )
if( crc_file.exists() ) {
crc_file.linkTo(to / crc_file.getName)
}
}
append_file.copyTo(to / append_file.getName)
}
val index_dirs = LevelDBClient.find_sequence_files(from, LevelDBClient.INDEX_SUFFIX)
if( !index_dirs.isEmpty ) {
val index_file = index_dirs.last._2
var target = to / index_file.getName
target.mkdirs()
LevelDBClient.copyIndex(index_file, target)
}
}
def stash_clear(directory:File) {
val stash = directory / "stash"
stash.recursiveDelete
}
def unstash(directory:File) {
val tmp_stash = directory / "stash.tmp"
tmp_stash.recursiveDelete
val stash = directory / "stash"
if( stash.exists() ) {
delete_store(directory)
copy_store_dir(stash, directory)
stash.recursiveDelete
}
}
def delete_store(directory: File) {
// Delete any existing files to make space for the stash we will be restoring..
var t: TreeMap[Long, File] = LevelDBClient.find_sequence_files(directory, LevelDBClient.LOG_SUFFIX)
for (entry <- t) {
val file = entry._2
file.delete()
val crc_file = directory / (file.getName+".crc32" )
if( crc_file.exists() ) {
crc_file.delete()
}
}
for (file <- LevelDBClient.find_sequence_files(directory, LevelDBClient.INDEX_SUFFIX)) {
file._2.recursiveDelete
}
}
}

View File

@ -1,461 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.replicated
import org.apache.activemq.leveldb.{LevelDBStoreTest, LevelDBClient, LevelDBStore}
import org.apache.activemq.util.ServiceStopper
import java.util
import org.fusesource.hawtdispatch._
import org.apache.activemq.leveldb.replicated.dto._
import org.fusesource.hawtdispatch.transport._
import java.net.URI
import org.fusesource.hawtbuf.{Buffer, AsciiBuffer}
import org.apache.activemq.leveldb.util._
import FileSupport._
import java.io.{IOException, RandomAccessFile, File}
import scala.beans.BeanProperty
import java.util.concurrent.{CountDownLatch, TimeUnit}
import javax.management.ObjectName
import org.apache.activemq.broker.jmx.AnnotatedMBean
object SlaveLevelDBStore extends Log
/**
*/
class SlaveLevelDBStore extends LevelDBStore with ReplicatedLevelDBStoreTrait {
import SlaveLevelDBStore._
import ReplicationSupport._
import collection.JavaConversions._
@BeanProperty
var connect = "tcp://0.0.0.0:61619"
val queue = createQueue("leveldb replication slave")
var replay_from = 0L
var caughtUp = false
var wal_session:Session = _
var transfer_session:Session = _
var status = "initialized"
override def createClient = new LevelDBClient(this) {
// We don't want to start doing index snapshots until
// he slave is caught up.
override def post_log_rotate: Unit = {
if( caughtUp ) {
writeExecutor {
snapshotIndex(false)
}
}
}
// The snapshots we create are based on what has been replayed.
override def nextIndexSnapshotPos:Long = indexRecoveryPosition
}
override def doStart() = {
queue.setLabel("slave: "+node_id)
client.init()
if (purgeOnStatup) {
purgeOnStatup = false
db.client.locked_purge
info("Purged: "+this)
}
db.client.dirtyIndexFile.recursiveDelete
db.client.plistIndexFile.recursiveDelete
start_slave_connections
if( java.lang.Boolean.getBoolean("org.apache.activemq.leveldb.test") ) {
val name = new ObjectName(objectName.toString + ",view=Test")
AnnotatedMBean.registerMBean(brokerService.getManagementContext, new LevelDBStoreTest(this), name)
}
}
var stopped = false
override def doStop(stopper: ServiceStopper) = {
if( java.lang.Boolean.getBoolean("org.apache.activemq.leveldb.test") )
brokerService.getManagementContext().unregisterMBean(new ObjectName(objectName.toString+",view=Test"));
val latch = new CountDownLatch(1)
stop_connections(^{
latch.countDown
})
// Make sure the sessions are stopped before we close the client.
latch.await()
client.stop()
}
def restart_slave_connections = {
stop_connections(^{
client.stop()
client = createClient
client.init()
start_slave_connections
})
}
def start_slave_connections = {
val transport: TcpTransport = create_transport
status = "Attaching to master: "+connect
info(status)
wal_session = new Session(transport, (session)=>{
// lets stash away our current state so that we can unstash it
// in case we don't get caught up.. If the master dies,
// the stashed data might be the best option to become the master.
stash(directory)
delete_store(directory)
debug("Log replication session connected")
session.request_then(SYNC_ACTION, null) { body =>
val response = JsonCodec.decode(body, classOf[SyncResponse])
transfer_missing(response)
session.handler = wal_handler(session)
}
})
wal_session.start
}
def create_transport: TcpTransport = {
val transport = new TcpTransport()
transport.setBlockingExecutor(blocking_executor)
transport.setDispatchQueue(queue)
transport.connecting(new URI(connect), null)
transport
}
def stop_connections(cb:Task) = {
var task = ^{
unstash(directory)
cb.run()
}
val wal_session_copy = wal_session
if( wal_session_copy !=null ) {
wal_session = null
val next = task
task = ^{
wal_session_copy.transport.stop(next)
}
}
val transfer_session_copy = transfer_session
if( transfer_session_copy !=null ) {
transfer_session = null
val next = task
task = ^{
transfer_session_copy.transport.stop(next)
}
}
task.run();
}
var wal_append_position = 0L
var wal_append_offset = 0L
@volatile
var wal_date = 0L
def send_wal_ack = {
queue.assertExecuting()
if( caughtUp && !stopped && wal_session!=null) {
val ack = new WalAck()
ack.position = wal_append_position
// info("Sending ack: "+wal_append_position)
wal_session.send_replication_frame(ACK_ACTION, ack)
if( replay_from != ack.position ) {
val old_replay_from = replay_from
replay_from = ack.position
client.writeExecutor {
client.replay_from(old_replay_from, ack.position, false)
}
}
}
}
val pending_log_removes = new util.ArrayList[Long]()
def wal_handler(session:Session): (AnyRef)=>Unit = (command)=>{
command match {
case command:ReplicationFrame =>
command.action match {
case WAL_ACTION =>
val value = JsonCodec.decode(command.body, classOf[LogWrite])
if( caughtUp && value.offset ==0 && value.file!=0 ) {
client.log.rotate
}
trace("%s, Slave WAL update: (file:%s, offset: %d, length: %d)".format(directory, value.file.toHexString, value.offset, value.length))
val file = client.log.next_log(value.file)
val buffer = map(file, value.offset, value.length, false)
def readData = session.codec.readData(buffer, ^{
if( value.sync ) {
buffer.force()
}
unmap(buffer)
wal_append_offset = value.offset+value.length
wal_append_position = value.file + wal_append_offset
wal_date = value.date
if( !stopped ) {
if( caughtUp ) {
client.log.current_appender.skip(value.length)
}
send_wal_ack
}
})
if( client.log.recordLogTestSupport!=null ) {
client.log.recordLogTestSupport.writeCall.call {
readData
}
} else {
readData
}
case LOG_DELETE_ACTION =>
val value = JsonCodec.decode(command.body, classOf[LogDelete])
if( !caughtUp ) {
pending_log_removes.add(value.log)
} else {
client.log.delete(value.log)
}
case OK_ACTION =>
// This comes in as response to a disconnect we send.
case _ => session.fail("Unexpected command action: "+command.action)
}
}
}
class Session(transport:Transport, on_login: (Session)=>Unit) extends TransportHandler(transport) {
val response_callbacks = new util.LinkedList[(ReplicationFrame)=>Unit]()
override def onTransportFailure(error: IOException) {
if( isStarted ) {
warn("Unexpected session error: "+error)
queue.after(1, TimeUnit.SECONDS) {
if( isStarted ) {
restart_slave_connections
}
}
}
super.onTransportFailure(error)
}
override def onTransportConnected {
super.onTransportConnected
val login = new Login
login.security_token = securityToken
login.node_id = node_id
request_then(LOGIN_ACTION, login) { body =>
on_login(Session.this)
}
}
def disconnect(cb:Task) = queue {
send_replication_frame(DISCONNECT_ACTION, null)
transport.flush()
transport.stop(cb)
}
def fail(msg:String) = {
error(msg)
transport.stop(NOOP)
}
var handler: (AnyRef)=>Unit = response_handler
def onTransportCommand(command: AnyRef) = handler(command)
def request_then(action:AsciiBuffer, body:AnyRef)(cb:(Buffer)=>Unit) = {
request(action, body){ response =>
response.action match {
case OK_ACTION =>
cb(response.body)
case ERROR_ACTION =>
fail(action+" failed: "+response.body.ascii().toString)
case _ =>
fail("Unexpected response action: "+response.action)
}
}
}
def request(action:AsciiBuffer, body:AnyRef)(cb:(ReplicationFrame)=>Unit) = {
response_callbacks.addLast(cb)
send_replication_frame(action, body)
}
def response_handler: (AnyRef)=>Unit = (command)=> {
command match {
case command:ReplicationFrame =>
if( response_callbacks.isEmpty ) {
error("No response callback registered")
transport.stop(NOOP)
} else {
val callback = response_callbacks.removeFirst()
callback(command)
}
}
}
}
def transfer_missing(state:SyncResponse) = {
val dirty_index = client.dirtyIndexFile
dirty_index.recursiveDelete
val snapshot_index = client.snapshotIndexFile(state.snapshot_position)
val transport = new TcpTransport()
transport.setBlockingExecutor(blocking_executor)
transport.setDispatchQueue(queue)
transport.connecting(new URI(connect), null)
debug("%s: Connecting download session. Snapshot index at: %s".format(directory, state.snapshot_position.toHexString))
transfer_session = new Session(transport, (session)=> {
var total_files = 0
var total_size = 0L
var downloaded_size = 0L
var downloaded_files = 0
def update_download_status = {
status = "Attaching... Downloaded %.2f/%.2f kb and %d/%d files".format(downloaded_size/1024f, total_size/1024f, downloaded_files, total_files)
info(status)
}
debug("Download session connected...")
// Transfer the log files..
var append_offset = 0L
for( x <- state.log_files ) {
if( x.file == state.append_log ) {
append_offset = x.length
}
val stashed_file: File = directory / "stash" / x.file
val target_file: File = directory / x.file
def previously_downloaded:Boolean = {
if( !stashed_file.exists() )
return false
if (stashed_file.length() < x.length )
return false
if (stashed_file.length() == x.length )
return stashed_file.cached_crc32 == x.crc32
if( x.file == state.append_log ) {
return false;
}
return stashed_file.cached_crc32 == x.crc32
}
// We don't have to transfer log files that have been previously transferred.
if( previously_downloaded ) {
// lets link it from the stash directory..
info("Slave skipping download of: log/"+x.file)
if( x.file == state.append_log ) {
stashed_file.copyTo(target_file) // let not link a file that's going to be modified..
} else {
stashed_file.linkTo(target_file)
}
} else {
val transfer = new Transfer()
transfer.file = "log/"+x.file
transfer.offset = 0
transfer.length = x.length
debug("Slave requested: "+transfer.file)
total_size += x.length
total_files += 1
session.request_then(GET_ACTION, transfer) { body =>
val buffer = map(target_file, 0, x.length, false)
session.codec.readData(buffer, ^{
unmap(buffer)
trace("%s, Downloaded %s, offset:%d, length:%d", directory, transfer.file, transfer.offset, transfer.length)
downloaded_size += x.length
downloaded_files += 1
update_download_status
})
}
}
}
// Transfer the index files..
if( !state.index_files.isEmpty ) {
dirty_index.mkdirs()
}
for( x <- state.index_files ) {
val transfer = new Transfer()
transfer.file = snapshot_index.getName+"/"+x.file
transfer.offset = 0
transfer.length = x.length
info("Slave requested: "+transfer.file)
total_size += x.length
total_files += 1
session.request_then(GET_ACTION, transfer) { body =>
val buffer = map(dirty_index / x.file, 0, x.length, false)
session.codec.readData(buffer, ^{
unmap(buffer)
trace("%s, Downloaded %s, offset:%d, length:%d", directory, transfer.file, transfer.offset, transfer.length)
downloaded_size += x.length
downloaded_files += 1
update_download_status
})
}
}
session.request_then(DISCONNECT_ACTION, null) { body =>
// Ok we are now caught up.
status = "Attached"
info(status)
stash_clear(directory) // we don't need the stash anymore.
transport.stop(NOOP)
transfer_session = null
replay_from = state.snapshot_position
if( wal_append_position < state.wal_append_position ) {
wal_append_position = state.wal_append_position
wal_append_offset = append_offset
}
client.writeExecutor {
if( !state.index_files.isEmpty ) {
trace("%s: Index sync complete, copying to snapshot.", directory)
client.copyDirtyIndexToSnapshot(state.wal_append_position)
}
client.replay_init()
}
caughtUp = true
client.log.open(wal_append_offset)
send_wal_ack
for( i <- pending_log_removes ) {
client.log.delete(i);
}
pending_log_removes.clear()
}
})
transfer_session.start
state.snapshot_position
}
}

View File

@ -1,70 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.replicated
import org.fusesource.hawtdispatch.transport.{TransportListener, DefaultTransportListener, Transport}
import java.util
import org.apache.activemq.leveldb.replicated.ReplicationSupport._
import org.fusesource.hawtdispatch._
import org.apache.activemq.leveldb.util.JsonCodec
import java.io.IOException
import org.fusesource.hawtbuf.AsciiBuffer
/**
*/
abstract class TransportHandler(val transport: Transport) extends TransportListener {
var outbound = new util.LinkedList[(AnyRef, ()=>Unit)]()
val codec = new ReplicationProtocolCodec
transport.setProtocolCodec(codec)
transport.setTransportListener(this)
def start = {
transport.start(NOOP)
}
def onTransportConnected = transport.resumeRead()
def onTransportDisconnected() = {}
def onRefill = drain
def onTransportFailure(error: IOException) = transport.stop(NOOP)
def drain:Unit = {
while( !outbound.isEmpty ) {
val (value, on_send) = outbound.peekFirst()
if( transport.offer(value) ) {
outbound.removeFirst()
if( on_send!=null ) {
on_send()
}
} else {
return
}
}
}
def send(value:AnyRef):Unit = send(value, null)
def send(value:AnyRef, on_send: ()=>Unit):Unit = {
transport.getDispatchQueue.assertExecuting()
outbound.add((value, on_send))
drain
}
def send_replication_frame(action:AsciiBuffer, body:AnyRef):Unit = send(new ReplicationFrame(action, if(body==null) null else JsonCodec.encode(body)))
def sendError(error:String) = send_replication_frame(ERROR_ACTION, error)
def sendOk(body:AnyRef) = send_replication_frame(OK_ACTION, body)
}

View File

@ -1,107 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.replicated.groups
import org.slf4j.{Logger, LoggerFactory}
import java.util.concurrent.TimeUnit
/**
* <p>
* Callback interface used to get notifications of changes
* to a cluster group.
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
trait ChangeListener {
def changed:Unit
def connected:Unit
def disconnected:Unit
}
object ChangeListenerSupport {
val LOG: Logger = LoggerFactory.getLogger(classOf[ChangeListenerSupport])
}
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
trait ChangeListenerSupport {
var listeners = List[ChangeListener]()
def connected:Boolean
def add(listener: ChangeListener): Unit = {
val connected = this.synchronized {
listeners ::= listener
this.connected
}
if (connected) {
listener.connected
}
}
def remove(listener: ChangeListener): Unit = this.synchronized {
listeners = listeners.filterNot(_ == listener)
}
def fireConnected() = {
val listeners = this.synchronized { this.listeners }
check_elapsed_time {
for (listener <- listeners) {
listener.connected
}
}
}
def fireDisconnected() = {
val listeners = this.synchronized { this.listeners }
check_elapsed_time {
for (listener <- listeners) {
listener.disconnected
}
}
}
def fireChanged() = {
val listeners = this.synchronized { this.listeners }
val start = System.nanoTime()
check_elapsed_time {
for (listener <- listeners) {
listener.changed
}
}
}
def check_elapsed_time[T](func: => T):T = {
val start = System.nanoTime()
try {
func
} finally {
val end = System.nanoTime()
val elapsed = TimeUnit.NANOSECONDS.toMillis(end-start)
if( elapsed > 100 ) {
ChangeListenerSupport.LOG.warn("listeners are taking too long to process the events")
}
}
}
}

View File

@ -1,269 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.replicated.groups
import collection.mutable.{ListBuffer, HashMap}
import java.io._
import com.fasterxml.jackson.databind.ObjectMapper
import collection.JavaConversions._
import java.util.LinkedHashMap
import java.lang.{IllegalStateException, String}
import beans.BeanProperty
import com.fasterxml.jackson.annotation.JsonProperty
import org.apache.zookeeper.KeeperException.NoNodeException
import scala.reflect.ClassTag
/**
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
trait NodeState {
/**
* The id of the cluster node. There can be multiple node with this ID,
* but only the first node in the cluster will be the master for for it.
*/
def id: String
override
def toString = new String(ClusteredSupport.encode(this), "UTF-8")
}
class TextNodeState extends NodeState {
@BeanProperty
@JsonProperty
var id:String = _
}
/**
*
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
object ClusteredSupport {
val DEFAULT_MAPPER = new ObjectMapper
def decode[T](t : Class[T], buffer: Array[Byte], mapper: ObjectMapper=DEFAULT_MAPPER): T = decode(t, new ByteArrayInputStream(buffer), mapper)
def decode[T](t : Class[T], in: InputStream, mapper: ObjectMapper): T = mapper.readValue(in, t)
def encode(value: AnyRef, mapper: ObjectMapper=DEFAULT_MAPPER): Array[Byte] = {
var baos: ByteArrayOutputStream = new ByteArrayOutputStream
encode(value, baos, mapper)
return baos.toByteArray
}
def encode(value: AnyRef, out: OutputStream, mapper: ObjectMapper): Unit = {
mapper.writeValue(out, value)
}
}
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class ClusteredSingletonWatcher[T <: NodeState](val stateClass:Class[T]) extends ChangeListenerSupport {
import ClusteredSupport._
protected var _group:ZooKeeperGroup = _
def group = _group
/**
* Override to use a custom configured mapper.
*/
def mapper = ClusteredSupport.DEFAULT_MAPPER
private val listener = new ChangeListener() {
def changed() {
val members = _group.members
val t = new LinkedHashMap[String, T]()
members.foreach {
case (path, data) =>
try {
val value = decode(stateClass, data, mapper)
t.put(path, value)
} catch {
case e: Throwable =>
e.printStackTrace()
}
}
changed_decoded(t)
}
def connected = {
onConnected
changed
ClusteredSingletonWatcher.this.fireConnected
}
def disconnected = {
onDisconnected
changed
ClusteredSingletonWatcher.this.fireDisconnected
}
}
protected def onConnected = {}
protected def onDisconnected = {}
def start(group:ZooKeeperGroup) = this.synchronized {
if(_group !=null )
throw new IllegalStateException("Already started.")
_group = group
_group.add(listener)
}
def stop = this.synchronized {
if(_group==null)
throw new IllegalStateException("Not started.")
_group.remove(listener)
_members = HashMap[String, ListBuffer[(String, T)]]()
_group = null
}
def connected = this.synchronized {
if(_group==null) {
false
} else {
_group.connected
}
}
protected var _members = HashMap[String, ListBuffer[(String, T)]]()
def members = this.synchronized { _members }
def changed_decoded(m: LinkedHashMap[String, T]) = {
this.synchronized {
if( _group!=null ) {
_members = HashMap[String, ListBuffer[(String, T)]]()
m.foreach { case node =>
_members.getOrElseUpdate(node._2.id, ListBuffer[(String, T)]()).append(node)
}
}
}
fireChanged
}
def masters = this.synchronized {
_members.mapValues(_.head._2).toArray.map(_._2).toArray(new ClassTag[T] {
def runtimeClass = stateClass
override def erasure = stateClass
})
}
}
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class ClusteredSingleton[T <: NodeState ](stateClass:Class[T]) extends ClusteredSingletonWatcher[T](stateClass) {
import ClusteredSupport._
private var _eid:String = _
/** the ephemeral id of the node is unique within in the group */
def eid = _eid
private var _state:T = _
override def stop = {
this.synchronized {
if(_state != null) {
leave
}
super.stop
}
}
def join(state:T):Unit = this.synchronized {
if(state==null)
throw new IllegalArgumentException("State cannot be null")
if(state.id==null)
throw new IllegalArgumentException("The state id cannot be null")
if(_group==null)
throw new IllegalStateException("Not started.")
this._state = state
while( connected ) {
if( _eid == null ) {
_eid = group.join(encode(state, mapper))
return;
} else {
try {
_group.update(_eid, encode(state, mapper))
return;
} catch {
case e:NoNodeException =>
this._eid = null;
}
}
}
}
def leave:Unit = this.synchronized {
if(this._state==null)
throw new IllegalStateException("Not joined")
if(_group==null)
throw new IllegalStateException("Not started.")
this._state = null.asInstanceOf[T]
if( _eid!=null && connected ) {
_group.leave(_eid)
_eid = null
}
}
override protected def onDisconnected {
}
override protected def onConnected {
if( this._state!=null ) {
join(this._state)
}
}
def isMaster:Boolean = this.synchronized {
if(this._state==null)
return false;
_members.get(this._state.id) match {
case Some(nodes) =>
nodes.headOption.map { x=>
x._1 == _eid
}.getOrElse(false)
case None => false
}
}
def master = this.synchronized {
if(this._state==null)
throw new IllegalStateException("Not joined")
_members.get(this._state.id).map(_.head._2)
}
def slaves = this.synchronized {
if(this._state==null)
throw new IllegalStateException("Not joined")
val rc = _members.get(this._state.id).map(_.toList).getOrElse(List())
rc.drop(1).map(_._2)
}
}

View File

@ -1,207 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.replicated.groups
import org.apache.zookeeper._
import org.linkedin.zookeeper.tracker._
import scala.collection.mutable.HashMap
import org.linkedin.zookeeper.client.LifecycleListener
import collection.JavaConversions._
import java.util.{LinkedHashMap, Collection}
import org.apache.zookeeper.KeeperException.{ConnectionLossException, NoNodeException}
import scala.Predef._
import scala.Some
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
object ZooKeeperGroupFactory {
def create(zk: ZKClient, path: String):ZooKeeperGroup = new ZooKeeperGroup(zk, path)
def members(zk: ZKClient, path: String):LinkedHashMap[String, Array[Byte]] = ZooKeeperGroup.members(zk, path)
}
/**
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
object ZooKeeperGroup {
def members(zk: ZKClient, path: String):LinkedHashMap[String, Array[Byte]] = {
var rc = new LinkedHashMap[String, Array[Byte]]
zk.getAllChildren(path).sortWith((a,b)=> a < b).foreach { node =>
try {
if( node.matches("""0\d+""") ) {
rc.put(node, zk.getData(path+"/"+node))
} else {
None
}
} catch {
case e:Throwable =>
e.printStackTrace
}
}
rc
}
}
/**
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class ZooKeeperGroup(val zk: ZKClient, val root: String) extends LifecycleListener with ChangeListenerSupport {
var tree = new ZooKeeperTreeTracker[Array[Byte]](zk, new ZKByteArrayDataReader, root, 1)
var rebuildTree = false
val joins = HashMap[String, Int]()
var members = new LinkedHashMap[String, Array[Byte]]
private def member_path_prefix = root + "/0"
zk.registerListener(this)
create(root)
var treeEventHandler = new NodeEventsListener[Array[Byte]]() {
def onEvents(events: Collection[NodeEvent[Array[Byte]]]): Unit = {
if( !closed )
fire_cluster_change;
}
}
tree.track(treeEventHandler)
fire_cluster_change
@volatile
var closed = false
def close = this.synchronized {
closed = true
joins.foreach { case (path, version) =>
try {
if( zk.isConnected ) {
zk.delete(member_path_prefix + path, version)
}
} catch {
case x:NoNodeException => // Already deleted.
}
}
joins.clear
tree.destroy
zk.removeListener(this)
}
def connected = zk.isConnected
def onConnected() = {
this.synchronized {
// underlying ZooKeeperTreeTracker isn't rebuilding itself after
// the loss of the session, so we need to destroy/rebuild it on
// reconnect.
if (rebuildTree) {
tree.destroy
tree = new ZooKeeperTreeTracker[Array[Byte]](zk, new ZKByteArrayDataReader, root, 1)
tree.track(treeEventHandler)
} else {
rebuildTree = true
}
}
fireConnected()
}
def onDisconnected() = {
this.members = new LinkedHashMap()
fireDisconnected()
}
def join(data:Array[Byte]=null): String = this.synchronized {
val id = zk.createWithParents(member_path_prefix, data, CreateMode.EPHEMERAL_SEQUENTIAL).stripPrefix(member_path_prefix)
joins.put(id, 0)
id
}
def update(path:String, data:Array[Byte]=null): Unit = this.synchronized {
joins.get(path) match {
case Some(ver) =>
try {
val stat = zk.setData(member_path_prefix + path, data, ver)
joins.put(path, stat.getVersion)
}
catch {
case e:NoNodeException =>
joins.remove(path)
throw e;
}
case None => throw new NoNodeException("Has not joined locally: "+path)
}
}
def leave(path:String): Unit = this.synchronized {
joins.remove(path).foreach {
case version =>
try {
zk.delete(member_path_prefix + path, version)
} catch {
case x: NoNodeException => // Already deleted.
case x: ConnectionLossException => // disconnected
}
}
}
private def fire_cluster_change: Unit = {
this.synchronized {
val t = tree.getTree.toList.filterNot { x =>
// don't include the root node, or nodes that don't match our naming convention.
(x._1 == root) || !x._1.stripPrefix(root).matches("""/0\d+""")
}
this.members = new LinkedHashMap()
t.sortWith((a,b)=> a._1 < b._1 ).foreach { x=>
this.members.put(x._1.stripPrefix(member_path_prefix), x._2.getData)
}
}
fireChanged()
}
private def create(path: String, count : java.lang.Integer = 0): Unit = {
try {
if (zk.exists(path, false) != null) {
return
}
try {
// try create given path in persistent mode
zk.createOrSetWithParents(path, "", CreateMode.PERSISTENT)
} catch {
case ignore: KeeperException.NodeExistsException =>
}
} catch {
case ignore : KeeperException.SessionExpiredException => {
if (count > 20) {
// we tried enought number of times
throw new IllegalStateException("Cannot create path " + path, ignore)
}
// try to create path with increased counter value
create(path, count + 1)
}
}
}
}

View File

@ -1,323 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.util
import java.io._
import org.fusesource.hawtdispatch._
import org.apache.activemq.leveldb.LevelDBClient
import org.fusesource.leveldbjni.internal.Util
import org.apache.activemq.leveldb.util.ProcessSupport._
import java.util.zip.CRC32
object FileSupport {
implicit def toRichFile(file:File):RichFile = new RichFile(file)
val onWindows = System.getProperty("os.name").toLowerCase().startsWith("windows")
private var linkStrategy = 0
private val LOG = Log(getClass)
def link(source:File, target:File):Unit = {
linkStrategy match {
case 0 =>
// We first try to link via a native system call. Fails if
// we cannot load the JNI module.
try {
Util.link(source, target)
} catch {
case e:IOException => throw e
case e:Throwable =>
// Fallback.. to a slower impl..
LOG.debug("Native link system call not available")
linkStrategy = 5
link(source, target)
}
// TODO: consider implementing a case which does the native system call using JNA
case 5 =>
// Next we try to do the link by executing an
// operating system shell command
try {
if( onWindows ) {
system("fsutil", "hardlink", "create", target.getCanonicalPath, source.getCanonicalPath) match {
case(0, _, _) => // Success
case (_, out, err) =>
// TODO: we might want to look at the out/err to see why it failed
// to avoid falling back to the slower strategy.
LOG.debug("fsutil OS command not available either")
linkStrategy = 10
link(source, target)
}
} else {
system("ln", source.getCanonicalPath, target.getCanonicalPath) match {
case(0, _, _) => // Success
case (_, out, err) => None
// TODO: we might want to look at the out/err to see why it failed
// to avoid falling back to the slower strategy.
LOG.debug("ln OS command not available either")
linkStrategy = 2
link(source, target)
}
}
} catch {
case e:Throwable =>
}
case _ =>
// this final strategy is slow but sure to work.
source.copyTo(target)
}
}
def systemDir(name:String) = {
val baseValue = System.getProperty(name)
if( baseValue==null ) {
sys.error("The the %s system property is not set.".format(name))
}
val file = new File(baseValue)
if( !file.isDirectory ) {
sys.error("The the %s system property is not set to valid directory path %s".format(name, baseValue))
}
file
}
case class RichFile(self:File) {
def / (path:String) = new File(self, path)
def linkTo(target:File) = link(self, target)
def copyTo(target:File) = {
using(new FileOutputStream(target)){ os=>
using(new FileInputStream(self)){ is=>
FileSupport.copy(is, os)
}
}
}
def crc32(limit:Long=Long.MaxValue) = {
val checksum = new CRC32();
var remaining = limit;
using(new FileInputStream(self)) { in =>
val data = new Array[Byte](1024*4)
var count = in.read(data, 0, remaining.min(data.length).toInt)
while( count > 0 ) {
remaining -= count
checksum.update(data, 0, count);
count = in.read(data, 0, remaining.min(data.length).toInt)
}
}
checksum.getValue()
}
def cached_crc32 = {
val crc32_file = new File(self.getParentFile, self.getName+".crc32")
if( crc32_file.exists() && crc32_file.lastModified() > self.lastModified() ) {
crc32_file.readText().trim.toLong
} else {
val rc = crc32()
crc32_file.writeText(rc.toString)
rc
}
}
def list_files:Array[File] = {
Option(self.listFiles()).getOrElse(Array())
}
def recursiveList:List[File] = {
if( self.isDirectory ) {
self :: self.listFiles.toList.flatten( _.recursiveList )
} else {
self :: Nil
}
}
def recursiveDelete: Unit = {
if( self.exists ) {
if( self.isDirectory ) {
self.listFiles.foreach(_.recursiveDelete)
}
self.delete
}
}
def recursiveCopyTo(target: File) : Unit = {
if (self.isDirectory) {
target.mkdirs
self.listFiles.foreach( file=> file.recursiveCopyTo( target / file.getName) )
} else {
self.copyTo(target)
}
}
def readText(charset:String="UTF-8"): String = {
using(new FileInputStream(self)) { in =>
FileSupport.readText(in, charset)
}
}
def readBytes: Array[Byte] = {
using(new FileInputStream(self)) { in =>
FileSupport.readBytes(in)
}
}
def writeBytes(data:Array[Byte]):Unit = {
using(new FileOutputStream(self)) { out =>
FileSupport.writeBytes(out, data)
}
}
def writeText(data:String, charset:String="UTF-8"):Unit = {
using(new FileOutputStream(self)) { out =>
FileSupport.writeText(out, data, charset)
}
}
}
/**
* Returns the number of bytes copied.
*/
def copy(in: InputStream, out: OutputStream): Long = {
var bytesCopied: Long = 0
val buffer = new Array[Byte](8192)
var bytes = in.read(buffer)
while (bytes >= 0) {
out.write(buffer, 0, bytes)
bytesCopied += bytes
bytes = in.read(buffer)
}
bytesCopied
}
def using[R,C <: Closeable](closable: C)(proc: C=>R) = {
try {
proc(closable)
} finally {
try { closable.close } catch { case ignore:Throwable => }
}
}
def readText(in: InputStream, charset:String="UTF-8"): String = {
new String(readBytes(in), charset)
}
def readBytes(in: InputStream): Array[Byte] = {
val out = new ByteArrayOutputStream()
copy(in, out)
out.toByteArray
}
def writeText(out: OutputStream, value: String, charset:String="UTF-8"): Unit = {
writeBytes(out, value.getBytes(charset))
}
def writeBytes(out: OutputStream, data: Array[Byte]): Unit = {
copy(new ByteArrayInputStream(data), out)
}
}
object ProcessSupport {
import FileSupport._
implicit def toRichProcessBuilder(self:ProcessBuilder):RichProcessBuilder = new RichProcessBuilder(self)
case class RichProcessBuilder(self:ProcessBuilder) {
def start(out:OutputStream=null, err:OutputStream=null, in:InputStream=null) = {
self.redirectErrorStream(out == err)
val process = self.start
if( in!=null ) {
LevelDBClient.THREAD_POOL {
try {
using(process.getOutputStream) { out =>
FileSupport.copy(in, out)
}
} catch {
case _ : Throwable =>
}
}
} else {
process.getOutputStream.close
}
if( out!=null ) {
LevelDBClient.THREAD_POOL {
try {
using(process.getInputStream) { in =>
FileSupport.copy(in, out)
}
} catch {
case _ : Throwable =>
}
}
} else {
process.getInputStream.close
}
if( err!=null && err!=out ) {
LevelDBClient.THREAD_POOL {
try {
using(process.getErrorStream) { in =>
FileSupport.copy(in, err)
}
} catch {
case _ : Throwable =>
}
}
} else {
process.getErrorStream.close
}
process
}
}
implicit def toRichProcess(self:Process):RichProcess = new RichProcess(self)
case class RichProcess(self:Process) {
def onExit(func: (Int)=>Unit) = LevelDBClient.THREAD_POOL {
self.waitFor
func(self.exitValue)
}
}
implicit def toProcessBuilder(args:Seq[String]):ProcessBuilder = new ProcessBuilder().command(args : _*)
def launch(command:String*)(func: (Int, Array[Byte], Array[Byte])=>Unit ):Unit = launch(command)(func)
def launch(p:ProcessBuilder, in:InputStream=null)(func: (Int, Array[Byte], Array[Byte]) => Unit):Unit = {
val out = new ByteArrayOutputStream
val err = new ByteArrayOutputStream
p.start(out, err, in).onExit { code=>
func(code, out.toByteArray, err.toByteArray)
}
}
def system(command:String*):(Int, Array[Byte], Array[Byte]) = system(command)
def system(p:ProcessBuilder, in:InputStream=null):(Int, Array[Byte], Array[Byte]) = {
val out = new ByteArrayOutputStream
val err = new ByteArrayOutputStream
val process = p.start(out, err, in)
process.waitFor
(process.exitValue, out.toByteArray, err.toByteArray)
}
}

View File

@ -1,60 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.util
import com.fasterxml.jackson.databind.ObjectMapper
import org.fusesource.hawtbuf.{ByteArrayOutputStream, Buffer}
import java.io.InputStream
/**
*
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
object JsonCodec {
final val mapper: ObjectMapper = new ObjectMapper
def decode[T](buffer: Buffer, clazz: Class[T]): T = {
val original = Thread.currentThread.getContextClassLoader
Thread.currentThread.setContextClassLoader(this.getClass.getClassLoader)
try {
return mapper.readValue(buffer.in, clazz)
} finally {
Thread.currentThread.setContextClassLoader(original)
}
}
def decode[T](is: InputStream, clazz : Class[T]): T = {
var original: ClassLoader = Thread.currentThread.getContextClassLoader
Thread.currentThread.setContextClassLoader(this.getClass.getClassLoader)
try {
return JsonCodec.mapper.readValue(is, clazz)
}
finally {
Thread.currentThread.setContextClassLoader(original)
}
}
def encode(value: AnyRef): Buffer = {
var baos = new ByteArrayOutputStream
mapper.writeValue(baos, value)
return baos.toBuffer
}
}

View File

@ -1,146 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.util
import java.util.concurrent.atomic.AtomicLong
import org.slf4j.{MDC, Logger, LoggerFactory}
import java.lang.{Throwable, String}
/**
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
object Log {
def apply(clazz:Class[_]):Log = apply(clazz.getName.stripSuffix("$"))
def apply(name:String):Log = new Log {
override val log = LoggerFactory.getLogger(name)
}
def apply(value:Logger):Log = new Log {
override val log = value
}
}
/**
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
trait Log {
import Log._
val log = LoggerFactory.getLogger(getClass.getName.stripSuffix("$"))
private def format(message:String, args:Seq[Any]) = {
if( args.isEmpty ) {
message
} else {
message.format(args.map(_.asInstanceOf[AnyRef]) : _*)
}
}
def error(m: => String, args:Any*): Unit = {
if( log.isErrorEnabled ) {
log.error(format(m, args.toSeq))
}
}
def error(e: Throwable, m: => String, args:Any*): Unit = {
if( log.isErrorEnabled ) {
log.error(format(m, args.toSeq), e)
}
}
def error(e: Throwable): Unit = {
if( log.isErrorEnabled ) {
log.error(e.getMessage, e)
}
}
def warn(m: => String, args:Any*): Unit = {
if( log.isWarnEnabled ) {
log.warn(format(m, args.toSeq))
}
}
def warn(e: Throwable, m: => String, args:Any*): Unit = {
if( log.isWarnEnabled ) {
log.warn(format(m, args.toSeq), e)
}
}
def warn(e: Throwable): Unit = {
if( log.isWarnEnabled ) {
log.warn(e.toString, e)
}
}
def info(m: => String, args:Any*): Unit = {
if( log.isInfoEnabled ) {
log.info(format(m, args.toSeq))
}
}
def info(e: Throwable, m: => String, args:Any*): Unit = {
if( log.isInfoEnabled ) {
log.info(format(m, args.toSeq), e)
}
}
def info(e: Throwable): Unit = {
if( log.isInfoEnabled ) {
log.info(e.toString, e)
}
}
def debug(m: => String, args:Any*): Unit = {
if( log.isDebugEnabled ) {
log.debug(format(m, args.toSeq))
}
}
def debug(e: Throwable, m: => String, args:Any*): Unit = {
if( log.isDebugEnabled ) {
log.debug(format(m, args.toSeq), e)
}
}
def debug(e: Throwable): Unit = {
if( log.isDebugEnabled ) {
log.debug(e.toString, e)
}
}
def trace(m: => String, args:Any*): Unit = {
if( log.isTraceEnabled ) {
log.trace(format(m, args.toSeq))
}
}
def trace(e: Throwable, m: => String, args:Any*): Unit = {
if( log.isTraceEnabled ) {
log.trace(format(m, args.toSeq), e)
}
}
def trace(e: Throwable): Unit = {
if( log.isTraceEnabled ) {
log.trace(e.toString, e)
}
}
}

View File

@ -1,48 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.util
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class LongCounter(private var value:Long = 0) extends Serializable {
def clear() = value=0
def get() = value
def set(value:Long) = this.value = value
def incrementAndGet() = addAndGet(1)
def decrementAndGet() = addAndGet(-1)
def addAndGet(amount:Long) = {
value+=amount
value
}
def getAndIncrement() = getAndAdd(1)
def getAndDecrement() = getAndAdd(-11)
def getAndAdd(amount:Long) = {
val rc = value
value+=amount
rc
}
override def toString() = get().toString
}

View File

@ -1,50 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.util
case class TimeMetric() {
var max = 0L
def add(duration:Long) = this.synchronized {
max = max.max(duration)
}
def get = {
this.synchronized {
max
} / 1000000.0
}
def reset = {
this.synchronized {
val rc = max
max = 0
rc
} / 1000000.0
}
def apply[T](func: =>T):T = {
val start = System.nanoTime()
try {
func
} finally {
add(System.nanoTime() - start)
}
}
}

View File

@ -1,27 +0,0 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<html>
<head>
</head>
<body>
<p>
Stub for the LevelDB store implementation from https://github.com/fusesource/fuse-extra/tree/master/fusemq-leveldb
</p>
</body>
</html>

View File

@ -1,306 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.test;
import org.apache.activemq.Service;
import org.apache.activemq.command.ActiveMQQueue;
import org.apache.activemq.leveldb.CountDownFuture;
import org.apache.activemq.leveldb.LevelDBStore;
import org.apache.activemq.leveldb.replicated.ElectingLevelDBStore;
import org.apache.activemq.store.MessageStore;
import org.apache.commons.io.FileUtils;
import org.junit.After;
import org.junit.Ignore;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.concurrent.TimeUnit;
import static org.apache.activemq.leveldb.test.ReplicationTestSupport.*;
import static org.junit.Assert.*;
/**
*/
public class ElectingLevelDBStoreTest extends ZooKeeperTestSupport {
protected static final Logger LOG = LoggerFactory.getLogger(ElectingLevelDBStoreTest.class);
ArrayList<ElectingLevelDBStore> stores = new ArrayList<ElectingLevelDBStore>();
ElectingLevelDBStore master = null;
@Ignore("https://issues.apache.org/jira/browse/AMQ-5512")
@Test(timeout = 1000*60*10)
public void testElection() throws Exception {
deleteDirectory("leveldb-node1");
deleteDirectory("leveldb-node2");
deleteDirectory("leveldb-node3");
ArrayList<CountDownFuture> pending_starts = new ArrayList<CountDownFuture>();
for(String dir: new String[]{"leveldb-node1", "leveldb-node2", "leveldb-node3"}) {
ElectingLevelDBStore store = createStoreNode();
store.setDirectory(new File(data_dir(), dir));
stores.add(store);
pending_starts.add(asyncStart(store));
}
// At least one of the stores should have started.
CountDownFuture f = waitFor(30 * 1000, pending_starts.toArray(new CountDownFuture[pending_starts.size()]));
assertTrue(f!=null);
pending_starts.remove(f);
// The other stores should not start..
LOG.info("Making sure the other stores don't start");
Thread.sleep(5000);
for(CountDownFuture start: pending_starts) {
assertFalse(start.completed());
}
// Make sure only of the stores is reporting to be the master.
for(ElectingLevelDBStore store: stores) {
if( store.isMaster() ) {
assertNull(master);
master = store;
}
}
assertNotNull(master);
// We can work out who the slaves are...
HashSet<ElectingLevelDBStore> slaves = new HashSet<ElectingLevelDBStore>(stores);
slaves.remove(master);
// Start sending messages to the master.
ArrayList<String> expected_list = new ArrayList<String>();
MessageStore ms = master.createQueueMessageStore(new ActiveMQQueue("TEST"));
final int TOTAL = 500;
for (int i = 0; i < TOTAL; i++) {
if (i % ((int) (TOTAL * 0.10)) == 0) {
LOG.info("" + (100 * i / TOTAL) + "% done");
}
if( i == 250 ) {
LOG.info("Checking master state");
assertEquals(expected_list, getMessages(ms));
// mid way, lets kill the master..
LOG.info("Killing Master.");
master.stop();
// At least one of the remaining stores should complete starting.
LOG.info("Waiting for slave takeover...");
f = waitFor(60 * 1000, pending_starts.toArray(new CountDownFuture[pending_starts.size()]));
assertTrue(f!=null);
pending_starts.remove(f);
// Make sure one and only one of the slaves becomes the master..
master = null;
for(ElectingLevelDBStore store: slaves) {
if( store.isMaster() ) {
assertNull(master);
master = store;
}
}
assertNotNull(master);
slaves.remove(master);
ms = master.createQueueMessageStore(new ActiveMQQueue("TEST"));
}
String msgid = "m:" + i;
addMessage(ms, msgid);
expected_list.add(msgid);
}
LOG.info("Checking master state");
ArrayList<String> messagesInStore = getMessages(ms);
int index=0;
for (String id: expected_list) {
if (!id.equals(messagesInStore.get(index))) {
LOG.info("Mismatch for expected:" + id + ", got:" + messagesInStore.get(index));
break;
}
index++;
}
assertEquals(expected_list, messagesInStore);
}
@Test(timeout = 1000 * 60 * 10)
public void testZooKeeperServerFailure() throws Exception {
final ArrayList<ElectingLevelDBStore> stores = new ArrayList<ElectingLevelDBStore>();
ArrayList<CountDownFuture> pending_starts = new ArrayList<CountDownFuture>();
for (String dir : new String[]{"leveldb-node1", "leveldb-node2", "leveldb-node3"}) {
ElectingLevelDBStore store = createStoreNode();
store.setDirectory(new File(data_dir(), dir));
stores.add(store);
pending_starts.add(asyncStart(store));
}
// At least one of the stores should have started.
CountDownFuture f = waitFor(30 * 1000, pending_starts.toArray(new CountDownFuture[pending_starts.size()]));
assertTrue(f != null);
pending_starts.remove(f);
// The other stores should not start..
LOG.info("Making sure the other stores don't start");
Thread.sleep(5000);
for (CountDownFuture start : pending_starts) {
assertFalse(start.completed());
}
// Stop ZooKeeper..
LOG.info("SHUTTING DOWN ZooKeeper!");
connector.shutdown();
// None of the store should be slaves...
within( 30, TimeUnit.SECONDS, new Task(){
public void run() throws Exception {
for (ElectingLevelDBStore store : stores) {
assertFalse(store.isMaster());
}
}
});
}
/*
* testAMQ5082 tests the behavior of an ElectingLevelDBStore
* pool when ZooKeeper I/O timeouts occur. See issue AMQ-5082.
*/
@Test(timeout = 1000 * 60 * 5)
public void testAMQ5082() throws Throwable {
final ArrayList<ElectingLevelDBStore> stores = new ArrayList<ElectingLevelDBStore>();
LOG.info("Launching 3 stores");
for (String dir : new String[]{"leveldb-node1", "leveldb-node2", "leveldb-node3"}) {
ElectingLevelDBStore store = createStoreNode();
store.setDirectory(new File(data_dir(), dir));
stores.add(store);
asyncStart(store);
}
LOG.info("Waiting 30s for stores to start");
Thread.sleep(30 * 1000);
LOG.info("Checking for a single master");
ElectingLevelDBStore master = null;
for (ElectingLevelDBStore store: stores) {
if (store.isMaster()) {
assertNull(master);
master = store;
}
}
assertNotNull(master);
LOG.info("Imposing 1s I/O wait on Zookeeper connections, waiting 30s to confirm that quorum is not lost");
this.connector.testHandle.setIOWaitMillis(1 * 1000, 30 * 1000);
LOG.info("Confirming that the quorum has not been lost");
for (ElectingLevelDBStore store: stores) {
if (store.isMaster()) {
assertTrue(master == store);
}
}
LOG.info("Imposing 11s I/O wait on Zookeeper connections, waiting 30s for quorum to be lost");
this.connector.testHandle.setIOWaitMillis(11 * 1000, 60 * 1000);
LOG.info("Confirming that the quorum has been lost");
for (ElectingLevelDBStore store: stores) {
assertFalse(store.isMaster());
}
master = null;
LOG.info("Lifting I/O wait on Zookeeper connections, waiting 30s for quorum to be re-established");
this.connector.testHandle.setIOWaitMillis(0, 30 * 1000);
LOG.info("Checking for a single master");
for (ElectingLevelDBStore store: stores) {
if (store.isMaster()) {
assertNull(master);
master = store;
}
}
assertNotNull(master);
}
@After
public void stop() throws Exception {
if (master != null) {
master.stop();
FileUtils.deleteDirectory(master.directory());
}
for(ElectingLevelDBStore store: stores) {
store.stop();
FileUtils.deleteDirectory(store.directory());
}
stores.clear();
}
private CountDownFuture asyncStart(final Service service) {
final CountDownFuture<Throwable> f = new CountDownFuture<Throwable>();
LevelDBStore.BLOCKING_EXECUTOR().execute(new Runnable() {
public void run() {
try {
service.start();
f.set(null);
} catch (Throwable e) {
e.printStackTrace();
f.set(e);
}
}
});
return f;
}
private CountDownFuture asyncStop(final Service service) {
final CountDownFuture<Throwable> f = new CountDownFuture<Throwable>();
LevelDBStore.BLOCKING_EXECUTOR().execute(new Runnable() {
public void run() {
try {
service.stop();
f.set(null);
} catch (Throwable e) {
e.printStackTrace();
f.set(e);
}
}
});
return f;
}
private ElectingLevelDBStore createStoreNode() {
ElectingLevelDBStore store = new ElectingLevelDBStore();
store.setSecurityToken("foo");
store.setLogSize(1024 * 200);
store.setReplicas(2);
store.setSync("quorum_disk");
store.setZkSessionTimeout("15s");
store.setZkAddress("localhost:" + connector.getLocalPort());
store.setZkPath("/broker-stores");
store.setBrokerName("foo");
store.setHostname("localhost");
store.setBind("tcp://0.0.0.0:0");
return store;
}
}

View File

@ -1,128 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.test;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.util.ArrayList;
import org.apache.activemq.command.ActiveMQQueue;
import org.apache.activemq.command.MessageId;
import org.apache.activemq.leveldb.LevelDBStore;
import org.apache.activemq.leveldb.LevelDBStoreView;
import org.apache.activemq.leveldb.util.FileSupport;
import org.apache.activemq.store.MessageStore;
import org.apache.commons.io.FileUtils;
import org.junit.After;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.activemq.leveldb.test.ReplicationTestSupport.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class IndexRebuildTest {
protected static final Logger LOG = LoggerFactory.getLogger(IndexRebuildTest.class);
final int max = 30;
final int toLeave = 5;
ArrayList<LevelDBStore> stores = new ArrayList<LevelDBStore>();
@Test(timeout = 1000 * 60 * 10)
public void testRebuildIndex() throws Exception {
File masterDir = new File("target/activemq-data/leveldb-rebuild");
FileSupport.toRichFile(masterDir).recursiveDelete();
final LevelDBStore store = new LevelDBStore();
store.setDirectory(masterDir);
store.setLogDirectory(masterDir);
store.setLogSize(1024 * 10);
store.start();
stores.add(store);
ArrayList<MessageId> inserts = new ArrayList<MessageId>();
MessageStore ms = store.createQueueMessageStore(new ActiveMQQueue("TEST"));
for (int i = 0; i < max; i++) {
inserts.add(addMessage(ms, "m" + i).getMessageId());
}
int logFileCount = countLogFiles(store);
assertTrue("more than one journal file", logFileCount > 1);
for (MessageId id : inserts.subList(0, inserts.size() - toLeave)) {
removeMessage(ms, id);
}
LevelDBStoreView view = new LevelDBStoreView(store);
view.compact();
int reducedLogFileCount = countLogFiles(store);
assertTrue("log files deleted", logFileCount > reducedLogFileCount);
store.stop();
deleteTheIndex(store);
assertEquals("log files remain", reducedLogFileCount, countLogFiles(store));
// restart, recover and verify message read
store.start();
ms = store.createQueueMessageStore(new ActiveMQQueue("TEST"));
assertEquals(toLeave + " messages remain", toLeave, getMessages(ms).size());
}
private void deleteTheIndex(LevelDBStore store) throws IOException {
for (String index : store.getLogDirectory().list(new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
LOG.info("dir:" + dir + ", name: " + name);
return (name != null && name.endsWith(".index"));
}
})) {
File file = new File(store.getLogDirectory().getAbsoluteFile(), index);
LOG.info("Deleting index directory:" + file);
FileUtils.deleteDirectory(file);
}
}
private int countLogFiles(LevelDBStore store) {
return store.getLogDirectory().list(new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
LOG.info("dir:" + dir + ", name: " + name);
return (name != null && name.endsWith(".log"));
}
}).length;
}
@After
public void stop() throws Exception {
for (LevelDBStore store : stores) {
if (store.isStarted()) {
store.stop();
}
FileUtils.deleteDirectory(store.directory());
}
stores.clear();
}
}

View File

@ -1,103 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.test;
import org.apache.activemq.leveldb.replicated.MasterLevelDBStore;
import org.apache.commons.io.FileUtils;
import org.junit.After;
import org.junit.Test;
import java.io.File;
import java.net.BindException;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.URI;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import static org.junit.Assert.*;
/**
* @author <a href="http://www.christianposta.com/blog">Christian Posta</a>
*/
public class MasterLevelDBStoreTest {
MasterLevelDBStore store;
@Test(timeout = 1000*60*10)
public void testStoppingStoreStopsTransport() throws Exception {
store = new MasterLevelDBStore();
store.setDirectory(new File("target/activemq-data/master-leveldb-store-test"));
store.setReplicas(0);
ExecutorService threads = Executors.newFixedThreadPool(1);
threads.execute(new Runnable() {
@Override
public void run() {
try {
store.start();
} catch (Exception e) {
e.printStackTrace(); //To change body of catch statement use File | Settings | File Templates.
}
}
});
// give some time to come up..
Thread.sleep(2000);
String address = store.transport_server().getBoundAddress();
URI bindAddress = new URI(address);
System.out.println(address);
Socket socket = new Socket();
try {
socket.bind(new InetSocketAddress(bindAddress.getHost(), bindAddress.getPort()));
fail("We should not have been able to connect...");
} catch (BindException e) {
System.out.println("Good. We cannot bind.");
}
threads.execute(new Runnable() {
@Override
public void run() {
try {
store.stop();
} catch (Exception e) {
e.printStackTrace(); //To change body of catch statement use File | Settings | File Templates.
}
}
});
Thread.sleep(2000);
try {
socket.bind(new InetSocketAddress(bindAddress.getHost(), bindAddress.getPort()));
System.out.println("Can bind, so protocol server must have been shut down.");
} catch (IllegalStateException e) {
fail("Server protocol port is still opened..");
}
}
@After
public void stop() throws Exception {
if (store.isStarted()) {
store.stop();
FileUtils.deleteQuietly(store.directory());
}
}
}

View File

@ -1,458 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.test;
import org.apache.activemq.ActiveMQConnectionFactory;
import org.apache.activemq.broker.BrokerService;
import org.apache.activemq.broker.TransportConnector;
import org.apache.activemq.leveldb.replicated.ElectingLevelDBStore;
import org.junit.After;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import javax.jms.*;
import javax.management.ObjectName;
import javax.management.openmbean.CompositeData;
import java.io.File;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.net.ServerSocket;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.SynchronousQueue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.io.FileUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.junit.Assert.*;
/**
* Holds broker unit tests of the replicated leveldb store.
*/
public class ReplicatedLevelDBBrokerTest extends ZooKeeperTestSupport {
protected static final Logger LOG = LoggerFactory.getLogger(ReplicatedLevelDBBrokerTest.class);
final SynchronousQueue<BrokerService> masterQueue = new SynchronousQueue<BrokerService>();
ArrayList<BrokerService> brokers = new ArrayList<BrokerService>();
/**
* Tries to replicate the problem reported at:
* https://issues.apache.org/jira/browse/AMQ-4837
*/
@Ignore("https://issues.apache.org/jira/browse/AMQ-5512")
@Test(timeout = 1000*60*10)
public void testAMQ4837viaJMS() throws Throwable {
testAMQ4837(false);
}
/**
* Tries to replicate the problem reported at:
* https://issues.apache.org/jira/browse/AMQ-4837
*/
@Ignore("https://issues.apache.org/jira/browse/AMQ-5512")
@Test(timeout = 1000*60*10)
public void testAMQ4837viaJMX() throws Throwable {
for (int i = 0; i < 2; i++) {
LOG.info("testAMQ4837viaJMX - Iteration: " + i);
resetDataDirs();
testAMQ4837(true);
stopBrokers();
}
}
@Before
public void resetDataDirs() throws IOException {
deleteDirectory("node-1");
deleteDirectory("node-2");
deleteDirectory("node-3");
}
public interface Client{
public void execute(Connection connection) throws Exception;
}
protected Thread startFailoverClient(String name, final Client client) throws IOException, URISyntaxException {
String url = "failover://(tcp://localhost:"+port+")?maxReconnectDelay=500&nested.wireFormat.maxInactivityDuration=1000";
final ActiveMQConnectionFactory factory = new ActiveMQConnectionFactory(url);
Thread rc = new Thread(name) {
@Override
public void run() {
Connection connection = null;
try {
connection = factory.createConnection();
client.execute(connection);
} catch (Throwable e) {
e.printStackTrace();
} finally {
try {
connection.close();
} catch (JMSException e) {
}
}
}
};
rc.start();
return rc;
}
@Test
@Ignore
public void testReplicationQuorumLoss() throws Throwable {
System.out.println("======================================");
System.out.println(" Start 2 ActiveMQ nodes.");
System.out.println("======================================");
startBrokerAsync(createBrokerNode("node-1", port));
startBrokerAsync(createBrokerNode("node-2", port));
BrokerService master = waitForNextMaster();
System.out.println("======================================");
System.out.println(" Start the producer and consumer");
System.out.println("======================================");
final AtomicBoolean stopClients = new AtomicBoolean(false);
final ArrayBlockingQueue<String> errors = new ArrayBlockingQueue<String>(100);
final AtomicLong receivedCounter = new AtomicLong();
final AtomicLong sentCounter = new AtomicLong();
Thread producer = startFailoverClient("producer", new Client() {
@Override
public void execute(Connection connection) throws Exception {
Session session = connection.createSession(false, Session.CLIENT_ACKNOWLEDGE);
MessageProducer producer = session.createProducer(session.createQueue("test"));
long actual = 0;
while(!stopClients.get()) {
TextMessage msg = session.createTextMessage("Hello World");
msg.setLongProperty("id", actual++);
producer.send(msg);
sentCounter.incrementAndGet();
}
}
});
Thread consumer = startFailoverClient("consumer", new Client() {
@Override
public void execute(Connection connection) throws Exception {
connection.start();
Session session = connection.createSession(false, Session.CLIENT_ACKNOWLEDGE);
MessageConsumer consumer = session.createConsumer(session.createQueue("test"));
long expected = 0;
while(!stopClients.get()) {
Message msg = consumer.receive(200);
if( msg!=null ) {
long actual = msg.getLongProperty("id");
if( actual != expected ) {
errors.offer("Received got unexpected msg id: "+actual+", expected: "+expected);
}
msg.acknowledge();
expected = actual+1;
receivedCounter.incrementAndGet();
}
}
}
});
try {
assertCounterMakesProgress(sentCounter, 10, TimeUnit.SECONDS);
assertCounterMakesProgress(receivedCounter, 5, TimeUnit.SECONDS);
assertNull(errors.poll());
System.out.println("======================================");
System.out.println(" Master should stop once the quorum is lost.");
System.out.println("======================================");
ArrayList<BrokerService> stopped = stopSlaves();// stopping the slaves should kill the quorum.
assertStopsWithin(master, 10, TimeUnit.SECONDS);
assertNull(errors.poll()); // clients should not see an error since they are failover clients.
stopped.add(master);
System.out.println("======================================");
System.out.println(" Restart the slave. Clients should make progress again..");
System.out.println("======================================");
startBrokersAsync(createBrokerNodes(stopped));
assertCounterMakesProgress(sentCounter, 10, TimeUnit.SECONDS);
assertCounterMakesProgress(receivedCounter, 5, TimeUnit.SECONDS);
assertNull(errors.poll());
} catch (Throwable e) {
e.printStackTrace();
throw e;
} finally {
// Wait for the clients to stop..
stopClients.set(true);
producer.join();
consumer.join();
}
}
protected void startBrokersAsync(ArrayList<BrokerService> brokers) {
for (BrokerService broker : brokers) {
startBrokerAsync(broker);
}
}
protected ArrayList<BrokerService> createBrokerNodes(ArrayList<BrokerService> brokers) throws Exception {
ArrayList<BrokerService> rc = new ArrayList<BrokerService>();
for (BrokerService b : brokers) {
rc.add(createBrokerNode(b.getBrokerName(), connectPort(b)));
}
return rc;
}
protected ArrayList<BrokerService> stopSlaves() throws Exception {
ArrayList<BrokerService> rc = new ArrayList<BrokerService>();
for (BrokerService broker : brokers) {
if( broker.isSlave() ) {
System.out.println("Stopping slave: "+broker.getBrokerName());
broker.stop();
broker.waitUntilStopped();
rc.add(broker);
}
}
brokers.removeAll(rc);
return rc;
}
protected void assertStopsWithin(final BrokerService master, int timeout, TimeUnit unit) throws InterruptedException {
within(timeout, unit, new Task(){
@Override
public void run() throws Exception {
assertTrue(master.isStopped());
}
});
}
protected void assertCounterMakesProgress(final AtomicLong counter, int timeout, TimeUnit unit) throws InterruptedException {
final long initial = counter.get();
within(timeout, unit, new Task(){
public void run() throws Exception {
assertTrue(initial < counter.get());
}
});
}
public void testAMQ4837(boolean jmx) throws Throwable {
try {
System.out.println("======================================");
System.out.println("1. Start 3 activemq nodes.");
System.out.println("======================================");
startBrokerAsync(createBrokerNode("node-1"));
startBrokerAsync(createBrokerNode("node-2"));
startBrokerAsync(createBrokerNode("node-3"));
BrokerService master = waitForNextMaster();
System.out.println("======================================");
System.out.println("2. Push a message to the master and browse the queue");
System.out.println("======================================");
sendMessage(master, pad("Hello World #1", 1024));
assertEquals(1, browseMessages(master, jmx).size());
System.out.println("======================================");
System.out.println("3. Stop master node");
System.out.println("======================================");
stop(master);
BrokerService prevMaster = master;
master = waitForNextMaster();
System.out.println("======================================");
System.out.println("4. Push a message to the new master and browse the queue. Message summary and queue content ok.");
System.out.println("======================================");
assertEquals(1, browseMessages(master, jmx).size());
sendMessage(master, pad("Hello World #2", 1024));
assertEquals(2, browseMessages(master, jmx).size());
System.out.println("======================================");
System.out.println("5. Restart the stopped node & 6. stop current master");
System.out.println("======================================");
brokers.remove(prevMaster);
prevMaster = createBrokerNode(prevMaster.getBrokerName());
startBrokerAsync(prevMaster);
stop(master);
master = waitForNextMaster();
System.out.println("======================================");
System.out.println("7. Browse the queue on new master");
System.out.println("======================================");
assertEquals(2, browseMessages(master, jmx).size());
} catch (Throwable e) {
e.printStackTrace();
throw e;
}
}
private void stop(BrokerService master) throws Exception {
System.out.println("Stopping "+master.getBrokerName());
master.stop();
master.waitUntilStopped();
}
private BrokerService waitForNextMaster() throws InterruptedException {
System.out.println("Wait for master to start up...");
BrokerService master = masterQueue.poll(60, TimeUnit.SECONDS);
assertNotNull("Master elected", master);
assertFalse(master.isSlave());
assertNull("Only one master elected at a time..", masterQueue.peek());
System.out.println("Master started: " + master.getBrokerName());
return master;
}
private String pad(String value, int size) {
while( value.length() < size ) {
value += " ";
}
return value;
}
private void startBrokerAsync(BrokerService b) {
final BrokerService broker = b;
new Thread("Starting broker node: "+b.getBrokerName()){
@Override
public void run() {
try {
broker.start();
broker.waitUntilStarted();
masterQueue.put(broker);
} catch (Exception e) {
e.printStackTrace();
}
}
}.start();
}
private void sendMessage(BrokerService brokerService, String body) throws Exception {
TransportConnector connector = brokerService.getTransportConnectors().get(0);
ActiveMQConnectionFactory factory = new ActiveMQConnectionFactory(connector.getConnectUri());
Connection connection = factory.createConnection();
try {
connection.start();
Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
MessageProducer producer = session.createProducer(session.createQueue("FOO"));
producer.send(session.createTextMessage(body));
} finally {
connection.close();
}
}
private ArrayList<String> browseMessages(BrokerService brokerService, boolean jmx) throws Exception {
if( jmx ) {
return browseMessagesViaJMX(brokerService);
} else {
return browseMessagesViaJMS(brokerService);
}
}
private ArrayList<String> browseMessagesViaJMX(BrokerService brokerService) throws Exception {
ArrayList<String> rc = new ArrayList<String>();
ObjectName on = new ObjectName("org.apache.activemq:type=Broker,brokerName="+brokerService.getBrokerName()+",destinationType=Queue,destinationName=FOO");
CompositeData[] browse = (CompositeData[]) ManagementFactory.getPlatformMBeanServer().invoke(on, "browse", null, null);
for (CompositeData cd : browse) {
rc.add(cd.get("Text").toString()) ;
}
return rc;
}
private ArrayList<String> browseMessagesViaJMS(BrokerService brokerService) throws Exception {
ArrayList<String> rc = new ArrayList<String>();
ActiveMQConnectionFactory factory = new ActiveMQConnectionFactory("tcp://localhost:"+ connectPort(brokerService));
Connection connection = factory.createConnection();
try {
connection.start();
Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
QueueBrowser browser = session.createBrowser(session.createQueue("FOO"));
Enumeration enumeration = browser.getEnumeration();
while (enumeration.hasMoreElements()) {
TextMessage textMessage = (TextMessage) enumeration.nextElement();
rc.add(textMessage.getText());
}
} finally {
connection.close();
}
return rc;
}
private int connectPort(BrokerService brokerService) throws IOException, URISyntaxException {
TransportConnector connector = brokerService.getTransportConnectors().get(0);
return connector.getConnectUri().getPort();
}
int port;
@Before
public void findFreePort() throws Exception {
ServerSocket socket = new ServerSocket(0);
port = socket.getLocalPort();
socket.close();
}
@After
public void stopBrokers() throws Exception {
for (BrokerService broker : brokers) {
try {
stop(broker);
} catch (Exception e) {
}
}
brokers.clear();
resetDataDirs();
}
private BrokerService createBrokerNode(String id) throws Exception {
return createBrokerNode(id, 0);
}
private BrokerService createBrokerNode(String id, int port) throws Exception {
BrokerService bs = new BrokerService();
bs.getManagementContext().setCreateConnector(false);
brokers.add(bs);
bs.setBrokerName(id);
bs.setPersistenceAdapter(createStoreNode(id));
TransportConnector connector = new TransportConnector();
connector.setUri(new URI("tcp://0.0.0.0:" + port));
bs.addConnector(connector);
return bs;
}
private ElectingLevelDBStore createStoreNode(String id) {
// This little hack is in here because we give each of the 3 brokers
// different broker names so they can show up in JMX correctly,
// but the store needs to be configured with the same broker name
// so that they can find each other in ZK properly.
ElectingLevelDBStore store = new ElectingLevelDBStore() {
@Override
public void start() throws Exception {
this.setBrokerName("localhost");
super.start();
}
};
store.setDirectory(new File(data_dir(), id));
store.setContainer(id);
store.setReplicas(3);
store.setSync("quorum_disk");
store.setZkAddress("localhost:" + connector.getLocalPort());
store.setZkSessionTimeout("15s");
store.setHostname("localhost");
store.setBind("tcp://0.0.0.0:0");
return store;
}
}

View File

@ -1,309 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.test;
import org.apache.activemq.Service;
import org.apache.activemq.command.ActiveMQQueue;
import org.apache.activemq.leveldb.CountDownFuture;
import org.apache.activemq.leveldb.LevelDBStore;
import org.apache.activemq.leveldb.replicated.ElectingLevelDBStore;
import org.apache.activemq.leveldb.replicated.MasterLevelDBStore;
import org.apache.activemq.leveldb.replicated.SlaveLevelDBStore;
import org.apache.activemq.leveldb.util.FileSupport;
import org.apache.activemq.store.MessageStore;
import org.apache.commons.io.FileUtils;
import org.fusesource.hawtdispatch.transport.TcpTransport;
import org.junit.After;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.concurrent.TimeUnit;
import static org.apache.activemq.leveldb.test.ReplicationTestSupport.addMessage;
import static org.apache.activemq.leveldb.test.ReplicationTestSupport.createPlayload;
import static org.apache.activemq.leveldb.test.ReplicationTestSupport.getMessages;
import static org.junit.Assert.*;
/**
*/
public class ReplicatedLevelDBStoreTest {
protected static final Logger LOG = LoggerFactory.getLogger(ReplicatedLevelDBStoreTest.class);
ArrayList<LevelDBStore> stores = new ArrayList<LevelDBStore>();
@Test(timeout = 1000*60*10)
public void testMinReplicaEnforced() throws Exception {
File masterDir = new File("target/activemq-data/leveldb-node1");
File slaveDir = new File("target/activemq-data/leveldb-node2");
FileSupport.toRichFile(masterDir).recursiveDelete();
FileSupport.toRichFile(slaveDir).recursiveDelete();
final MasterLevelDBStore master = createMaster(masterDir);
master.setReplicas(2);
CountDownFuture masterStartLatch = asyncStart(master);
stores.add(master);
// Start the store should not complete since we don't have enough
// replicas.
assertFalse(masterStartLatch.await(2, TimeUnit.SECONDS));
// Adding a slave should allow the master startup to complete.
SlaveLevelDBStore slave = createSlave(master, slaveDir);
slave.start();
stores.add(slave);
assertTrue(masterStartLatch.await(2, TimeUnit.SECONDS));
// New updates should complete quickly now..
MessageStore ms = master.createQueueMessageStore(new ActiveMQQueue("TEST"));
CountDownFuture f = asyncAddMessage(ms, "m1");
assertTrue(f.await(1, TimeUnit.SECONDS));
// If the slave goes offline, then updates should once again
// not complete.
slave.stop();
f = asyncAddMessage(ms, "m2");
assertFalse(f.await(2, TimeUnit.SECONDS));
// Restart and the op should complete.
slave = createSlave(master, slaveDir);
slave.start();
assertTrue(f.await(2, TimeUnit.SECONDS));
master.stop();
slave.stop();
}
private CountDownFuture asyncAddMessage(final MessageStore ms, final String body) {
final CountDownFuture<Throwable> f = new CountDownFuture<Throwable>();
LevelDBStore.BLOCKING_EXECUTOR().execute(new Runnable() {
public void run() {
try {
addMessage(ms, body);
f.set(null);
} catch (Throwable e) {
f.set(e);
}
}
});
return f;
}
private CountDownFuture asyncStart(final Service service) {
final CountDownFuture<Throwable> f = new CountDownFuture<Throwable>();
LevelDBStore.BLOCKING_EXECUTOR().execute(new Runnable() {
public void run() {
try {
service.start();
f.set(null);
} catch (Throwable e) {
f.set(e);
}
}
});
return f;
}
@Test(timeout = 1000*60*10)
public void testReplication() throws Exception {
LinkedList<File> directories = new LinkedList<File>();
directories.add(new File("target/activemq-data/leveldb-node1"));
directories.add(new File("target/activemq-data/leveldb-node2"));
directories.add(new File("target/activemq-data/leveldb-node3"));
resetDirectories(directories);
// For some reason this had to be 64k to trigger a bug where
// slave index snapshots were being done incorrectly.
String playload = createPlayload(64*1024);
ArrayList<String> expected_list = new ArrayList<String>();
// We will rotate between 3 nodes the task of being the master.
for (int j = 0; j < 5; j++) {
MasterLevelDBStore master = createMaster(directories.get(0));
CountDownFuture masterStart = asyncStart(master);
SlaveLevelDBStore slave1 = createSlave(master, directories.get(1));
SlaveLevelDBStore slave2 = createSlave(master, directories.get(2));
asyncStart(slave2);
masterStart.await();
if (j == 0) {
stores.add(master);
stores.add(slave1);
stores.add(slave2);
}
MessageStore ms = master.createQueueMessageStore(new ActiveMQQueue("TEST"));
LOG.info("Checking: "+master.getDirectory());
assertEquals(expected_list, getMessages(ms));
LOG.info("Adding messages...");
final int TOTAL = 500;
for (int i = 0; i < TOTAL; i++) {
if (i % ((int) (TOTAL * 0.10)) == 0) {
LOG.info("" + (100 * i / TOTAL) + "% done");
}
if (i == 250) {
slave1.start();
slave2.stop();
LOG.info("Checking: "+master.getDirectory());
assertEquals(expected_list, getMessages(ms));
}
String msgid = "m:" + j + ":" + i;
addMessage(ms, msgid, playload);
expected_list.add(msgid);
}
LOG.info("Checking: "+master.getDirectory());
assertEquals(expected_list, getMessages(ms));
LOG.info("Stopping master: " + master.getDirectory());
master.stop();
Thread.sleep(3*1000);
LOG.info("Stopping slave: " + slave1.getDirectory());
slave1.stop();
// Rotate the dir order so that slave1 becomes the master next.
directories.addLast(directories.removeFirst());
}
}
void resetDirectories(LinkedList<File> directories) {
for (File directory : directories) {
FileSupport.toRichFile(directory).recursiveDelete();
directory.mkdirs();
FileSupport.toRichFile(new File(directory, "nodeid.txt")).writeText(directory.getName(), "UTF-8");
}
}
@Test(timeout = 1000*60*10)
public void testSlowSlave() throws Exception {
LinkedList<File> directories = new LinkedList<File>();
directories.add(new File("target/activemq-data/leveldb-node1"));
directories.add(new File("target/activemq-data/leveldb-node2"));
directories.add(new File("target/activemq-data/leveldb-node3"));
resetDirectories(directories);
File node1Dir = directories.get(0);
File node2Dir = directories.get(1);
File node3Dir = directories.get(2);
ArrayList<String> expected_list = new ArrayList<String>();
MasterLevelDBStore node1 = createMaster(node1Dir);
stores.add(node1);
CountDownFuture masterStart = asyncStart(node1);
// Lets create a 1 slow slave...
SlaveLevelDBStore node2 = new SlaveLevelDBStore() {
boolean hitOnce = false;
@Override
public TcpTransport create_transport() {
if( hitOnce ) {
return super.create_transport();
}
hitOnce = true;
TcpTransport transport = super.create_transport();
transport.setMaxReadRate(64*1024);
return transport;
}
};
stores.add(node2);
configureSlave(node2, node1, node2Dir);
SlaveLevelDBStore node3 = createSlave(node1, node3Dir);
stores.add(node3);
asyncStart(node2);
asyncStart(node3);
masterStart.await();
LOG.info("Adding messages...");
String playload = createPlayload(64 * 1024);
MessageStore ms = node1.createQueueMessageStore(new ActiveMQQueue("TEST"));
final int TOTAL = 10;
for (int i = 0; i < TOTAL; i++) {
if (i == 8) {
// Stop the fast slave so that we wait for the slow slave to
// catch up..
node3.stop();
}
String msgid = "m:" + ":" + i;
addMessage(ms, msgid, playload);
expected_list.add(msgid);
}
LOG.info("Checking node1 state");
assertEquals(expected_list, getMessages(ms));
LOG.info("Stopping node1: " + node1.node_id());
node1.stop();
LOG.info("Stopping slave: " + node2.node_id());
node2.stop();
}
@After
public void stop() throws Exception {
for(LevelDBStore store: stores) {
if (store.isStarted()) {
store.stop();
}
FileUtils.deleteDirectory(store.directory());
}
stores.clear();
}
private SlaveLevelDBStore createSlave(MasterLevelDBStore master, File directory) {
SlaveLevelDBStore slave = new SlaveLevelDBStore();
configureSlave(slave, master, directory);
return slave;
}
private SlaveLevelDBStore configureSlave(SlaveLevelDBStore slave, MasterLevelDBStore master, File directory) {
slave.setDirectory(directory);
slave.setConnect("tcp://127.0.0.1:" + master.getPort());
slave.setSecurityToken("foo");
slave.setLogSize(1023 * 200);
return slave;
}
private MasterLevelDBStore createMaster(File directory) {
MasterLevelDBStore master = new MasterLevelDBStore();
master.setDirectory(directory);
master.setBind("tcp://0.0.0.0:0");
master.setSecurityToken("foo");
master.setReplicas(2);
master.setLogSize(1023 * 200);
return master;
}
}

View File

@ -1,93 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.test;
import java.io.IOException;
import java.util.ArrayList;
import javax.jms.JMSException;
import org.apache.activemq.broker.ConnectionContext;
import org.apache.activemq.command.ActiveMQTextMessage;
import org.apache.activemq.command.Message;
import org.apache.activemq.command.MessageAck;
import org.apache.activemq.command.MessageId;
import org.apache.activemq.store.MessageRecoveryListener;
import org.apache.activemq.store.MessageStore;
/**
*/
public class ReplicationTestSupport {
static long id_counter = 0L;
static String payload = createPlayload(1024);
public static String createPlayload(int size) {
String payload = "";
for (int i = 0; i < size; i++) {
payload += "x";
}
return payload;
}
static public ActiveMQTextMessage addMessage(MessageStore ms, String id) throws JMSException, IOException {
return addMessage(ms, id, payload);
}
static public ActiveMQTextMessage addMessage(MessageStore ms, String id, String payload) throws JMSException, IOException {
ActiveMQTextMessage message = new ActiveMQTextMessage();
message.setPersistent(true);
message.setResponseRequired(true);
message.setStringProperty("id", id);
message.setText(payload);
id_counter += 1;
MessageId messageId = new MessageId("ID:localhost-56913-1254499826208-0:0:1:1:" + id_counter);
messageId.setBrokerSequenceId(id_counter);
message.setMessageId(messageId);
ms.addMessage(new ConnectionContext(), message);
return message;
}
static public void removeMessage(MessageStore ms, MessageId messageId) throws JMSException, IOException {
MessageAck ack = new MessageAck();
ack.setAckType(MessageAck.INDIVIDUAL_ACK_TYPE);
ack.setFirstMessageId(messageId);
ack.setLastMessageId(messageId);
ms.removeMessage(new ConnectionContext(), ack);
}
static public ArrayList<String> getMessages(MessageStore ms) throws Exception {
final ArrayList<String> rc = new ArrayList<String>();
ms.recover(new MessageRecoveryListener() {
public boolean recoverMessage(Message message) throws Exception {
rc.add(((ActiveMQTextMessage) message).getStringProperty("id"));
return true;
}
public boolean hasSpace() {
return true;
}
public boolean recoverMessageReference(MessageId ref) throws Exception {
return true;
}
public boolean isDuplicate(MessageId ref) {
return false;
}
});
return rc;
}
}

View File

@ -1,119 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.test;
import org.apache.activemq.leveldb.CountDownFuture;
import org.apache.activemq.leveldb.util.FileSupport;
import org.apache.commons.io.FileUtils;
import org.apache.zookeeper.server.TestServerCnxnFactory;
import org.apache.zookeeper.server.ZooKeeperServer;
import org.apache.zookeeper.server.persistence.FileTxnSnapLog;
import org.junit.After;
import org.junit.Before;
import java.io.File;
import java.net.InetSocketAddress;
import java.util.concurrent.TimeUnit;
/**
* Created by chirino on 10/30/13.
*/
public class ZooKeeperTestSupport {
protected TestServerCnxnFactory connector;
static File data_dir() {
return new File("target/activemq-data/leveldb-elections");
}
@Before
public void startZooKeeper() throws Exception {
FileSupport.toRichFile(data_dir()).recursiveDelete();
System.out.println("Starting ZooKeeper");
ZooKeeperServer zk_server = new ZooKeeperServer();
zk_server.setTickTime(500);
zk_server.setTxnLogFactory(new FileTxnSnapLog(new File(data_dir(), "zk-log"), new File(data_dir(), "zk-data")));
connector = new TestServerCnxnFactory();
connector.configure(new InetSocketAddress(0), 100);
connector.startup(zk_server);
System.out.println("ZooKeeper started");
}
@After
public void stopZooKeeper() throws Exception {
if( connector!=null ) {
connector.shutdown();
connector = null;
}
deleteDirectory("zk-log");
deleteDirectory("zk-data");
}
protected static interface Task {
public void run() throws Exception;
}
protected void within(int time, TimeUnit unit, Task task) throws InterruptedException {
long timeMS = unit.toMillis(time);
long deadline = System.currentTimeMillis() + timeMS;
while (true) {
try {
task.run();
return;
} catch (Throwable e) {
long remaining = deadline - System.currentTimeMillis();
if( remaining <=0 ) {
if( e instanceof RuntimeException ) {
throw (RuntimeException)e;
}
if( e instanceof Error ) {
throw (Error)e;
}
throw new RuntimeException(e);
}
Thread.sleep(Math.min(timeMS/10, remaining));
}
}
}
protected CountDownFuture waitFor(int timeout, CountDownFuture... futures) throws InterruptedException {
long deadline = System.currentTimeMillis()+timeout;
while( true ) {
for (CountDownFuture f:futures) {
if( f.await(1, TimeUnit.MILLISECONDS) ) {
return f;
}
}
long remaining = deadline - System.currentTimeMillis();
if( remaining < 0 ) {
return null;
} else {
Thread.sleep(Math.min(remaining / 10, 100L));
}
}
}
protected void deleteDirectory(String s) throws java.io.IOException {
try {
FileUtils.deleteDirectory(new File(data_dir(), s));
} catch (java.io.IOException e) {
}
}
}

View File

@ -1,101 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.zookeeper.server;
import java.io.IOException;
import java.nio.channels.SelectionKey;
import java.nio.channels.SocketChannel;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/*
* TestServerCnxnFactory allows a caller to impose an artifical
* wait on I/O over the ServerCnxn used to communicate with the
* ZooKeeper server.
*/
public class TestServerCnxnFactory extends NIOServerCnxnFactory {
protected static final Logger LOG = LoggerFactory.getLogger(TestServerCnxnFactory.class);
/* testHandle controls whehter or not an artifical wait
* is imposed when talking to the ZooKeeper server
*/
public TestHandle testHandle = new TestHandle();
public TestServerCnxnFactory() throws IOException {
super();
}
protected NIOServerCnxn createConnection(SocketChannel sock, SelectionKey sk) throws IOException {
return new TestServerCnxn(this.zkServer, sock, sk, this, testHandle);
}
/*
* TestHandle is handed to TestServerCnxn and is used to
* control the amount of time the TestServerCnxn waits
* before allowing an I/O operation.
*/
public class TestHandle {
private Object mu = new Object();
private int ioWaitMillis = 0;
/*
* Set an artifical I/O wait (in milliseconds) on ServerCnxn and
* then sleep for the specified number of milliseconds.
*/
public void setIOWaitMillis(int ioWaitMillis, int sleepMillis) {
synchronized(mu) {
this.ioWaitMillis = ioWaitMillis;
}
if (sleepMillis > 0) {
try {
Thread.sleep(sleepMillis);
} catch (InterruptedException e) {}
}
}
/*
* Get the number of milliseconds to wait before
* allowing ServerCnxn to perform I/O.
*/
public int getIOWaitMillis() {
synchronized(mu) {
return this.ioWaitMillis;
}
}
}
public class TestServerCnxn extends NIOServerCnxn {
public TestHandle testHandle;
public TestServerCnxn(ZooKeeperServer zk, SocketChannel sock, SelectionKey sk, NIOServerCnxnFactory factory, TestHandle testHandle) throws IOException {
super(zk, sock, sk, factory);
this.testHandle = testHandle;
}
public void doIO(SelectionKey k) throws InterruptedException {
final int millis = this.testHandle.getIOWaitMillis();
if (millis > 0) {
LOG.info("imposing a "+millis+" millisecond wait on ServerCxn: "+this);
try {
Thread.sleep(millis);
} catch (InterruptedException e) {}
}
super.doIO(k);
}
}
}

View File

@ -1,36 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# The logging properties used during tests..
#
log4j.rootLogger=INFO, console, file
#log4j.logger.org.apache.activemq.leveldb=TRACE
#log4j.logger.org.apache.zookeeper=DEBUG
# Console will only display warnnings
log4j.appender.console=org.apache.log4j.ConsoleAppender
log4j.appender.console.layout=org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern=%d | %-5p | %t | %m%n
log4j.appender.console.threshold=TRACE
# File appender will contain all info messages
log4j.appender.file=org.apache.log4j.FileAppender
log4j.appender.file.layout=org.apache.log4j.PatternLayout
log4j.appender.file.layout.ConversionPattern=%d | %-5p | %m | %c | %t%n
log4j.appender.file.file=target/test.log
log4j.appender.file.append=true

View File

@ -1,358 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.dfs
import org.apache.activemq.leveldb.util._
import org.fusesource.leveldbjni.internal.Util
import FileSupport._
import java.io._
import scala.collection.mutable._
import scala.collection.immutable.TreeMap
import org.fusesource.hawtbuf.Buffer
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.activemq.leveldb.{RecordLog, LevelDBClient}
import scala.Some
/**
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
object DFSLevelDBClient extends Log {
val MANIFEST_SUFFIX = ".mf"
val LOG_SUFFIX = LevelDBClient.LOG_SUFFIX
val INDEX_SUFFIX = LevelDBClient.INDEX_SUFFIX
def create_sequence_path(directory:Path, id:Long, suffix:String) = new Path(directory, ("%016x%s".format(id, suffix)))
def find_sequence_status(fs:FileSystem, directory:Path, suffix:String) = {
TreeMap((fs.listStatus(directory).flatMap { f =>
val name = f.getPath.getName
if( name.endsWith(suffix) ) {
try {
val base = name.stripSuffix(suffix)
val position = java.lang.Long.parseLong(base, 16);
Some(position -> f )
} catch {
case e:NumberFormatException => None
}
} else {
None
}
}): _* )
}
}
/**
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class DFSLevelDBClient(val store:DFSLevelDBStore) extends LevelDBClient(store) {
import DFSLevelDBClient._
case class Snapshot(current_manifest:String, files:Set[String])
var snapshots = TreeMap[Long, Snapshot]()
// Eventually we will allow warm standby slaves to add references to old
// snapshots so that we don't delete them while they are in the process
// of downloading the snapshot.
var snapshotRefCounters = HashMap[Long, LongCounter]()
var indexFileRefCounters = HashMap[String, LongCounter]()
def dfs = store.dfs
def dfsDirectory = new Path(store.dfsDirectory)
def dfsBlockSize = store.dfsBlockSize
def dfsReplication = store.dfsReplication
def remoteIndexPath = new Path(dfsDirectory, "index")
override def start() = {
might_fail {
directory.mkdirs()
dfs.mkdirs(dfsDirectory)
downloadLogFiles
dfs.mkdirs(remoteIndexPath)
downloadIndexFiles
}
super.start()
storeTrace("Master takeover by: "+store.containerId, true)
}
override def locked_purge = {
super.locked_purge
dfs.delete(dfsDirectory, true)
}
override def snapshotIndex(sync: Boolean) = {
val previous_snapshot = lastIndexSnapshotPos
super.snapshotIndex(sync)
// upload the snapshot to the dfs
uploadIndexFiles(lastIndexSnapshotPos)
// Drop the previous snapshot reference..
for( counter <- snapshotRefCounters.get(previous_snapshot)) {
if( counter.decrementAndGet() <= 0 ) {
snapshotRefCounters.remove(previous_snapshot)
}
}
gcSnapshotRefs
}
// downloads missing log files...
def downloadLogFiles {
val log_files = find_sequence_status(dfs, dfsDirectory, LOG_SUFFIX)
val downloads = log_files.flatMap( _ match {
case (id, status) =>
val target = LevelDBClient.create_sequence_file(directory, id, LOG_SUFFIX)
// is it missing or does the size not match?
if (!target.exists() || target.length() != status.getLen) {
Some((id, status))
} else {
None
}
})
if( !downloads.isEmpty ) {
val total_size = downloads.foldLeft(0L)((a,x)=> a+x._2.getLen)
downloads.foreach {
case (id, status) =>
val target = LevelDBClient.create_sequence_file(directory, id, LOG_SUFFIX)
// is it missing or does the size not match?
if (!target.exists() || target.length() != status.getLen) {
info("Downloading log file: "+status.getPath.getName)
using(dfs.open(status.getPath, 32*1024)) { is=>
using(new FileOutputStream(target)) { os=>
copy(is, os)
}
}
}
}
}
}
// See if there is a more recent index that can be downloaded.
def downloadIndexFiles {
snapshots = TreeMap()
dfs.listStatus(remoteIndexPath).foreach { status =>
val name = status.getPath.getName
indexFileRefCounters.put(name, new LongCounter())
if( name endsWith MANIFEST_SUFFIX ) {
info("Getting index snapshot manifest: "+status.getPath.getName)
val mf = using(dfs.open(status.getPath)) { is =>
JsonCodec.decode(is, classOf[IndexManifestDTO])
}
import collection.JavaConversions._
snapshots += mf.snapshot_id -> Snapshot(mf.current_manifest, Set(mf.files.toSeq:_*))
}
}
// Check for invalid snapshots..
for( (snapshotid, snapshot) <- snapshots) {
val matches = indexFileRefCounters.keySet & snapshot.files
if( matches.size != snapshot.files.size ) {
var path = create_sequence_path(remoteIndexPath, snapshotid, MANIFEST_SUFFIX)
warn("Deleting inconsistent snapshot manifest: "+path.getName)
dfs.delete(path, true)
snapshots -= snapshotid
}
}
// Add a ref to the last snapshot..
for( (snapshotid, _) <- snapshots.lastOption ) {
snapshotRefCounters.getOrElseUpdate(snapshotid, new LongCounter()).incrementAndGet()
}
// Increment index file refs..
for( key <- snapshotRefCounters.keys; snapshot <- snapshots.get(key); file <- snapshot.files ) {
indexFileRefCounters.getOrElseUpdate(file, new LongCounter()).incrementAndGet()
}
// Remove un-referenced index files.
for( (name, counter) <- indexFileRefCounters ) {
if( counter.get() <= 0 ) {
var path = new Path(remoteIndexPath, name)
info("Deleting unreferenced index file: "+path.getName)
dfs.delete(path, true)
indexFileRefCounters.remove(name)
}
}
val local_snapshots = Map(LevelDBClient.find_sequence_files(directory, INDEX_SUFFIX).values.flatten { dir =>
if( dir.isDirectory ) dir.listFiles() else Array[File]()
}.map(x=> (x.getName, x)).toSeq:_*)
for( (id, snapshot) <- snapshots.lastOption ) {
// increment the ref..
tempIndexFile.recursiveDelete
tempIndexFile.mkdirs
for( file <- snapshot.files ; if !file.endsWith(MANIFEST_SUFFIX) ) {
val target = tempIndexFile / file
// The file might be in a local snapshot already..
local_snapshots.get(file) match {
case Some(f) =>
// had it locally.. link it.
Util.link(f, target)
case None =>
// download..
var path = new Path(remoteIndexPath, file)
info("Downloading index file: "+path)
using(dfs.open(path, 32*1024)) { is=>
using(new FileOutputStream(target)) { os=>
copy(is, os)
}
}
}
}
val current = tempIndexFile / "CURRENT"
current.writeText(snapshot.current_manifest)
// We got everything ok, now rename.
tempIndexFile.renameTo(LevelDBClient.create_sequence_file(directory, id, INDEX_SUFFIX))
}
gcSnapshotRefs
}
def gcSnapshotRefs = {
snapshots = snapshots.filter { case (id, snapshot)=>
if (snapshotRefCounters.get(id).isDefined) {
true
} else {
for( file <- snapshot.files ) {
for( counter <- indexFileRefCounters.get(file) ) {
if( counter.decrementAndGet() <= 0 ) {
var path = new Path(remoteIndexPath, file)
info("Deleteing unreferenced index file: %s", path.getName)
dfs.delete(path, true)
indexFileRefCounters.remove(file)
}
}
}
false
}
}
}
def uploadIndexFiles(snapshot_id:Long):Unit = {
val source = LevelDBClient.create_sequence_file(directory, snapshot_id, INDEX_SUFFIX)
try {
// Build the new manifest..
val mf = new IndexManifestDTO
mf.snapshot_id = snapshot_id
mf.current_manifest = (source / "CURRENT").readText()
source.listFiles.foreach { file =>
val name = file.getName
if( name !="LOCK" && name !="CURRENT") {
mf.files.add(name)
}
}
import collection.JavaConversions._
mf.files.foreach { file =>
val refs = indexFileRefCounters.getOrElseUpdate(file, new LongCounter())
if(refs.get()==0) {
// Upload if not not yet on the remote.
val target = new Path(remoteIndexPath, file)
using(new FileInputStream(source / file)) { is=>
using(dfs.create(target, true, 1024*32, dfsReplication.toShort, dfsBlockSize)) { os=>
copy(is, os)
}
}
}
refs.incrementAndGet()
}
val target = create_sequence_path(remoteIndexPath, mf.snapshot_id, MANIFEST_SUFFIX)
mf.files.add(target.getName)
indexFileRefCounters.getOrElseUpdate(target.getName, new LongCounter()).incrementAndGet()
using(dfs.create(target, true, 1024*32, dfsReplication.toShort, dfsBlockSize)) { os=>
var outputStream:OutputStream = os.asInstanceOf[OutputStream]
JsonCodec.mapper.writeValue(outputStream, mf)
}
snapshots += snapshot_id -> Snapshot(mf.current_manifest, Set(mf.files.toSeq:_*))
snapshotRefCounters.getOrElseUpdate(snapshot_id, new LongCounter()).incrementAndGet()
} catch {
case e: Exception =>
warn(e, "Could not upload the index: " + e)
}
}
// Override the log appender implementation so that it
// stores the logs on the local and remote file systems.
override def createLog = new RecordLog(directory, LOG_SUFFIX) {
override protected def onDelete(file: File) = {
super.onDelete(file)
// also delete the file on the dfs.
dfs.delete(new Path(dfsDirectory, file.getName), false)
}
override def create_log_appender(position: Long, offset:Long) = {
new LogAppender(next_log(position), position, offset) {
val dfs_path = new Path(dfsDirectory, file.getName)
debug("Opening DFS log file for append: "+dfs_path.getName)
val dfs_os = dfs.create(dfs_path, true, RecordLog.BUFFER_SIZE, dfsReplication.toShort, dfsBlockSize )
debug("Opened")
override def flush = this.synchronized {
if( write_buffer.position() > 0 ) {
var buffer: Buffer = write_buffer.toBuffer
// Write it to DFS..
buffer.writeTo(dfs_os.asInstanceOf[OutputStream]);
// Now write it to the local FS.
val byte_buffer = buffer.toByteBuffer
val pos = append_offset-byte_buffer.remaining
flushed_offset.addAndGet(byte_buffer.remaining)
channel.write(byte_buffer, pos)
if( byte_buffer.hasRemaining ) {
throw new IOException("Short write")
}
write_buffer.reset()
}
}
override def force = {
dfs_os.sync()
}
override def on_close {
super.force
dfs_os.close()
}
}
}
}
}

View File

@ -1,75 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.dfs
import org.apache.hadoop.conf.Configuration
import org.apache.activemq.util.ServiceStopper
import org.apache.hadoop.fs.FileSystem
import scala.beans.BeanProperty
import java.net.InetAddress
import org.apache.activemq.leveldb.LevelDBStore
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class DFSLevelDBStore extends LevelDBStore {
@BeanProperty
var dfsUrl:String = _
@BeanProperty
var dfsConfig:String = _
@BeanProperty
var dfsDirectory:String = _
@BeanProperty
var dfsBlockSize = 1024*1024*50L
@BeanProperty
var dfsReplication = 1
@BeanProperty
var containerId:String = _
var dfs:FileSystem = _
override def doStart = {
if(dfs==null) {
Thread.currentThread().setContextClassLoader(getClass.getClassLoader)
val config = new Configuration()
config.set("fs.hdfs.impl.disable.cache", "true")
config.set("fs.file.impl.disable.cache", "true")
Option(dfsConfig).foreach(config.addResource(_))
Option(dfsUrl).foreach(config.set("fs.default.name", _))
dfsUrl = config.get("fs.default.name")
dfs = FileSystem.get(config)
}
if ( containerId==null ) {
containerId = InetAddress.getLocalHost.getHostName
}
super.doStart
}
override def doStop(stopper: ServiceStopper): Unit = {
super.doStop(stopper)
if(dfs!=null){
dfs.close()
}
}
override def createClient = new DFSLevelDBClient(this)
}

View File

@ -1,43 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.dfs;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlRootElement;
import java.util.HashSet;
import java.util.Set;
/**
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
@XmlRootElement(name="index_files")
@XmlAccessorType(XmlAccessType.FIELD)
public class IndexManifestDTO {
@XmlAttribute(name = "snapshot_id")
public long snapshot_id;
@XmlAttribute(name = "current_manifest")
public String current_manifest;
@XmlAttribute(name = "file")
public Set<String> files = new HashSet<String>();
}

View File

@ -1,44 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.test
import org.apache.activemq.ActiveMQConnectionFactory
import javax.jms.{Destination, ConnectionFactory}
import org.apache.activemq.command.{ActiveMQTopic, ActiveMQQueue}
/**
* <p>
* ActiveMQ implementation of the JMS Scenario class.
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class ActiveMQScenario extends JMSClientScenario {
override protected def factory:ConnectionFactory = {
val rc = new ActiveMQConnectionFactory
rc.setBrokerURL(url)
rc
}
override protected def destination(i:Int):Destination = destination_type match {
case "queue" => new ActiveMQQueue(indexed_destination_name(i))
case "topic" => new ActiveMQTopic(indexed_destination_name(i))
case _ => sys.error("Unsuported destination type: "+destination_type)
}
}

View File

@ -1,64 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.test
import org.apache.hadoop.fs.FileUtil
import java.io.File
import java.util.concurrent.TimeUnit
import org.apache.activemq.leveldb.{LevelDBStore}
import org.apache.activemq.leveldb.dfs.DFSLevelDBStore
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class DFSLevelDBFastEnqueueTest extends LevelDBFastEnqueueTest {
override def setUp: Unit = {
TestingHDFSServer.start
super.setUp
}
override def tearDown: Unit = {
super.tearDown
TestingHDFSServer.stop
}
override protected def createStore: LevelDBStore = {
var store: DFSLevelDBStore = new DFSLevelDBStore
store.setDirectory(dataDirectory)
store.setDfsDirectory("target/activemq-data/hdfs-leveldb")
return store
}
private def dataDirectory: File = {
return new File("target/activemq-data/leveldb")
}
/**
* On restart we will also delete the local file system store, so that we test restoring from
* HDFS.
*/
override protected def restartBroker(restartDelay: Int, checkpoint: Int): Unit = {
stopBroker
FileUtil.fullyDelete(dataDirectory)
TimeUnit.MILLISECONDS.sleep(restartDelay)
startBroker(false, checkpoint)
}
}

View File

@ -1,49 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.test
import org.apache.activemq.store.PersistenceAdapter
import java.io.File
import org.apache.activemq.leveldb.dfs.DFSLevelDBStore
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class DFSLevelDBStoreTest extends LevelDBStoreTest {
override protected def setUp: Unit = {
TestingHDFSServer.start
super.setUp
}
override protected def tearDown: Unit = {
super.tearDown
TestingHDFSServer.stop
}
override protected def createPersistenceAdapter(delete: Boolean): PersistenceAdapter = {
var store: DFSLevelDBStore = new DFSLevelDBStore
store.setDirectory(new File("target/activemq-data/haleveldb"))
store.setDfsDirectory("localhost")
if (delete) {
store.deleteAllMessages
}
return store
}
}

View File

@ -1,175 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.test
import junit.framework.TestCase
import org.apache.activemq.broker._
import org.apache.activemq.store._
import java.io.File
import junit.framework.Assert._
import org.apache.commons.math.stat.descriptive.DescriptiveStatistics
import region.policy.{PolicyEntry, PolicyMap}
import org.apache.activemq.leveldb.{LevelDBStore}
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class EnqueueRateScenariosTest extends TestCase {
var broker: BrokerService = null
override def setUp() {
import collection.JavaConversions._
broker = new BrokerService
broker.setDeleteAllMessagesOnStartup(true)
broker.setPersistenceAdapter(createStore)
broker.addConnector("tcp://0.0.0.0:0")
// val policies = new PolicyMap();
// val entry = new PolicyEntry
// entry.setQueue(">")
// policies.setPolicyEntries(List(entry))
// broker.setDestinationPolicy(policies)
broker.start
broker.waitUntilStarted()
}
override def tearDown() = {
if (broker != null) {
broker.stop
broker.waitUntilStopped
}
}
protected def canceledEnqueues() =
broker.getPersistenceAdapter.asInstanceOf[LevelDBStore].db.uowCanceledCounter
protected def enqueueOptimized() =
broker.getPersistenceAdapter.asInstanceOf[LevelDBStore].db.uowEnqueueDelayReqested
protected def enqueueNotOptimized() =
broker.getPersistenceAdapter.asInstanceOf[LevelDBStore].db.uowEnqueueNodelayReqested
protected def createStore: PersistenceAdapter = {
var store: LevelDBStore = new LevelDBStore
store.setDirectory(new File("target/activemq-data/leveldb"))
return store
}
def collect_benchmark(scenario:ActiveMQScenario, warmup:Int, samples_count:Int) = {
val (cancels, optimized, unoptimized) = scenario.with_load {
println("Warming up for %d seconds...".format(warmup))
Thread.sleep(warmup*1000)
println("Sampling...")
scenario.collection_start
val cancelStart = canceledEnqueues
val enqueueOptimizedStart = enqueueOptimized
val enqueueNotOptimizedStart = enqueueNotOptimized
for (i <- 0 until samples_count) {
Thread.sleep(1000);
scenario.collection_sample
}
(canceledEnqueues-cancelStart, enqueueOptimized-enqueueOptimizedStart, enqueueNotOptimized-enqueueNotOptimizedStart)
}
println("Done.")
var samples = scenario.collection_end
val error_rates = samples.get("e_custom").get.map(_._2)
assertFalse("Errors occured during scenario run: "+error_rates, error_rates.find(_ > 0 ).isDefined )
val producer_stats = new DescriptiveStatistics();
for( producer_rates <- samples.get("p_custom") ) {
for( i <- producer_rates ) {
producer_stats.addValue(i._2)
}
}
val consumer_stats = new DescriptiveStatistics();
for( consumer_rates <- samples.get("c_custom") ) {
for( i <- consumer_rates ) {
consumer_stats.addValue(i._2)
}
}
(producer_stats, consumer_stats, cancels*1.0/samples_count, optimized*1.0/samples_count, unoptimized*1.0/samples_count)
}
def benchmark(name:String, warmup:Int=3, samples_count:Int=15, async_send:Boolean=true)(setup:(ActiveMQScenario)=>Unit) = {
println("Benchmarking: "+name)
var options: String = "?jms.watchTopicAdvisories=false&jms.useAsyncSend="+async_send
val url = broker.getTransportConnectors.get(0).getConnectUri + options
val scenario = new ActiveMQScenario
scenario.url = url
scenario.display_errors = true
scenario.persistent = true
scenario.message_size = 1024 * 3
setup(scenario)
val (producer_stats, consumer_stats, cancels, optimized, unoptimized) = collect_benchmark(scenario, warmup, samples_count)
println("%s: producer avg msg/sec: %,.2f, stddev: %,.2f".format(name, producer_stats.getMean, producer_stats.getStandardDeviation))
println("%s: consumer avg msg/sec: %,.2f, stddev: %,.2f".format(name, consumer_stats.getMean, consumer_stats.getStandardDeviation))
println("%s: canceled enqueues/sec: %,.2f".format(name,cancels))
println("%s: optimized enqueues/sec: %,.2f".format(name,optimized))
println("%s: unoptimized enqueues/sec: %,.2f".format(name,unoptimized))
(producer_stats, consumer_stats, cancels, optimized, unoptimized)
}
def testHighCancelRatio = {
val (producer_stats, consumer_stats, cancels, optimized, unoptimized) = benchmark("both_connected_baseline") { scenario=>
scenario.producers = 1
scenario.consumers = 1
}
val cancel_ratio = cancels / producer_stats.getMean
assertTrue("Expecting more than 80%% of the enqueues get canceled. But only %.2f%% was canceled".format(cancel_ratio*100), cancel_ratio > .80)
}
def testDecoupledProducerRate = {
// Fill up the queue with messages.. for the benefit of the next benchmark..
val from_1_to_0 = benchmark("from_1_to_0", 60) { scenario=>
scenario.producers = 1
scenario.consumers = 0
}
val from_1_to_10 = benchmark("from_1_to_10") { scenario=>
scenario.producers = 1
scenario.consumers = 10
}
val from_1_to_1 = benchmark("from_1_to_1") { scenario=>
scenario.producers = 1
scenario.consumers = 1
}
var percent_diff0 = (1.0 - (from_1_to_0._1.getMean / from_1_to_1._1.getMean)).abs * 100
var percent_diff1 = (1.0 - (from_1_to_1._1.getMean / from_1_to_10._1.getMean)).abs * 100
var msg0 = "The 0 vs 1 consumer scenario producer rate was within %.2f%%".format(percent_diff0)
var msg1 = "The 1 vs 10 consumer scenario producer rate was within %.2f%%".format(percent_diff1)
println(msg0)
println(msg1)
assertTrue(msg0, percent_diff0 <= 60)
assertTrue(msg1, percent_diff1 <= 20)
}
}

View File

@ -1,36 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.test;
import org.apache.activemq.broker.BrokerService;
import org.apache.activemq.leveldb.LevelDBStore;
import java.io.File;
public class IDERunner {
public static void main(String[]args) throws Exception {
BrokerService bs = new BrokerService();
bs.addConnector("tcp://localhost:61616");
LevelDBStore store = new LevelDBStore();
store.setDirectory(new File("target/activemq-data/haleveldb"));
bs.setPersistenceAdapter(store);
bs.deleteAllMessages();
bs.start();
bs.waitUntilStopped();
}
}

View File

@ -1,204 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.test
import java.lang.Thread
import javax.jms._
/**
* <p>
* Simulates load on a JMS sever using the JMS messaging API.
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
abstract class JMSClientScenario extends Scenario {
def createProducer(i:Int) = {
new ProducerClient(i)
}
def createConsumer(i:Int) = {
new ConsumerClient(i)
}
protected def destination(i:Int):Destination
def indexed_destination_name(i:Int) = destination_type match {
case "queue" => queue_prefix+destination_name+"-"+(i%destination_count)
case "topic" => topic_prefix+destination_name+"-"+(i%destination_count)
case _ => sys.error("Unsuported destination type: "+destination_type)
}
protected def factory:ConnectionFactory
def jms_ack_mode = {
ack_mode match {
case "auto" => Session.AUTO_ACKNOWLEDGE
case "client" => Session.CLIENT_ACKNOWLEDGE
case "dups_ok" => Session.DUPS_OK_ACKNOWLEDGE
case "transacted" => Session.SESSION_TRANSACTED
case _ => throw new Exception("Invalid ack mode: "+ack_mode)
}
}
trait JMSClient extends Client {
@volatile
var connection:Connection = _
var message_counter=0L
var worker = new Thread() {
override def run() {
var reconnect_delay = 0
while( !done.get ) {
try {
if( reconnect_delay!=0 ) {
Thread.sleep(reconnect_delay)
reconnect_delay=0
}
connection = factory.createConnection(user_name, password)
// connection.setClientID(name)
connection.setExceptionListener(new ExceptionListener {
def onException(exception: JMSException) {
}
})
connection.start()
execute
} catch {
case e:Throwable =>
if( !done.get ) {
if( display_errors ) {
e.printStackTrace
}
error_counter.incrementAndGet
reconnect_delay = 1000
}
} finally {
dispose
}
}
}
}
def dispose {
try {
connection.close()
} catch {
case _:Throwable =>
}
}
def execute:Unit
def start = {
worker.start
}
def shutdown = {
assert(done.get)
if ( worker!=null ) {
dispose
worker.join(1000)
while(worker.isAlive ) {
println("Worker did not shutdown quickly.. interrupting thread.")
worker.interrupt()
worker.join(1000)
}
worker = null
}
}
def name:String
}
class ConsumerClient(val id: Int) extends JMSClient {
val name: String = "consumer " + id
def execute {
var session = connection.createSession(false, jms_ack_mode)
var consumer:MessageConsumer = if( durable ) {
session.createDurableSubscriber(destination(id).asInstanceOf[Topic], name, selector, no_local)
} else {
session.createConsumer(destination(id), selector, no_local)
}
while( !done.get() ) {
val msg = consumer.receive(500)
if( msg!=null ) {
consumer_counter.incrementAndGet()
if (consumer_sleep != 0) {
Thread.sleep(consumer_sleep)
}
if(session.getAcknowledgeMode == Session.CLIENT_ACKNOWLEDGE) {
msg.acknowledge();
}
}
}
}
}
class ProducerClient(val id: Int) extends JMSClient {
val name: String = "producer " + id
def execute {
val session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE)
val producer:MessageProducer = session.createProducer(destination(id))
producer.setDeliveryMode(if( persistent ) {
DeliveryMode.PERSISTENT
} else {
DeliveryMode.NON_PERSISTENT
})
val msg = session.createTextMessage(body(name))
headers_for(id).foreach { case (key, value) =>
msg.setStringProperty(key, value)
}
while( !done.get() ) {
producer.send(msg)
producer_counter.incrementAndGet()
if (producer_sleep != 0) {
Thread.sleep(producer_sleep)
}
}
}
}
def body(name:String) = {
val buffer = new StringBuffer(message_size)
buffer.append("Message from " + name+"\n")
for( i <- buffer.length to message_size ) {
buffer.append(('a'+(i%26)).toChar)
}
var rc = buffer.toString
if( rc.length > message_size ) {
rc.substring(0, message_size)
} else {
rc
}
}
}

View File

@ -1,209 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.test
import org.apache.activemq.ActiveMQConnection
import org.apache.activemq.ActiveMQConnectionFactory
import org.apache.activemq.broker.BrokerService
import org.apache.activemq.command.ActiveMQQueue
import org.apache.activemq.command.ConnectionControl
import org.junit.After
import org.junit.Before
import org.junit.Test
import javax.jms._
import java.io.File
import java.util.Vector
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicLong
import junit.framework.Assert._
import org.apache.activemq.leveldb.util.Log
import junit.framework.TestCase
import org.apache.activemq.leveldb.LevelDBStore
object LevelDBFastEnqueueTest extends Log
class LevelDBFastEnqueueTest extends TestCase {
import LevelDBFastEnqueueTest._
@Test def testPublishNoConsumer: Unit = {
startBroker(true, 10)
val sharedCount: AtomicLong = new AtomicLong(toSend)
var start: Long = System.currentTimeMillis
var executorService: ExecutorService = Executors.newCachedThreadPool
var i: Int = 0
while (i < parallelProducer) {
executorService.execute(new Runnable {
def run: Unit = {
try {
publishMessages(sharedCount, 0)
}
catch {
case e: Exception => {
exceptions.add(e)
}
}
}
})
i += 1
}
executorService.shutdown
executorService.awaitTermination(30, TimeUnit.MINUTES)
assertTrue("Producers done in time", executorService.isTerminated)
assertTrue("No exceptions: " + exceptions, exceptions.isEmpty)
var totalSent: Long = toSend * payloadString.length
var duration: Double = System.currentTimeMillis - start
info("Duration: " + duration + "ms")
info("Rate: " + (toSend * 1000 / duration) + "m/s")
info("Total send: " + totalSent)
info("Total journal write: " + store.getLogAppendPosition)
info("Journal writes %: " + store.getLogAppendPosition / totalSent.asInstanceOf[Double] * 100 + "%")
stopBroker
restartBroker(0, 1200000)
consumeMessages(toSend)
}
@Test def testPublishNoConsumerNoCheckpoint: Unit = {
toSend = 100
startBroker(true, 0)
val sharedCount: AtomicLong = new AtomicLong(toSend)
var start: Long = System.currentTimeMillis
var executorService: ExecutorService = Executors.newCachedThreadPool
var i: Int = 0
while (i < parallelProducer) {
executorService.execute(new Runnable {
def run: Unit = {
try {
publishMessages(sharedCount, 0)
}
catch {
case e: Exception => {
exceptions.add(e)
}
}
}
})
i += 1;
}
executorService.shutdown
executorService.awaitTermination(30, TimeUnit.MINUTES)
assertTrue("Producers done in time", executorService.isTerminated)
assertTrue("No exceptions: " + exceptions, exceptions.isEmpty)
var totalSent: Long = toSend * payloadString.length
broker.getAdminView.gc
var duration: Double = System.currentTimeMillis - start
info("Duration: " + duration + "ms")
info("Rate: " + (toSend * 1000 / duration) + "m/s")
info("Total send: " + totalSent)
info("Total journal write: " + store.getLogAppendPosition)
info("Journal writes %: " + store.getLogAppendPosition / totalSent.asInstanceOf[Double] * 100 + "%")
stopBroker
restartBroker(0, 0)
consumeMessages(toSend)
}
private def consumeMessages(count: Long): Unit = {
var connection: ActiveMQConnection = connectionFactory.createConnection.asInstanceOf[ActiveMQConnection]
connection.setWatchTopicAdvisories(false)
connection.start
var session: Session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE)
var consumer: MessageConsumer = session.createConsumer(destination)
var i: Int = 0
while (i < count) {
assertNotNull("got message " + i, consumer.receive(10000))
i += 1;
}
assertNull("none left over", consumer.receive(2000))
}
protected def restartBroker(restartDelay: Int, checkpoint: Int): Unit = {
stopBroker
TimeUnit.MILLISECONDS.sleep(restartDelay)
startBroker(false, checkpoint)
}
override def tearDown() = stopBroker
def stopBroker: Unit = {
if (broker != null) {
broker.stop
broker.waitUntilStopped
}
}
private def publishMessages(count: AtomicLong, expiry: Int): Unit = {
var connection: ActiveMQConnection = connectionFactory.createConnection.asInstanceOf[ActiveMQConnection]
connection.setWatchTopicAdvisories(false)
connection.start
var session: Session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE)
var producer: MessageProducer = session.createProducer(destination)
var start: Long = System.currentTimeMillis
var i: Long = 0l
var bytes: Array[Byte] = payloadString.getBytes
while ((({
i = count.getAndDecrement; i
})) > 0) {
var message: Message = null
if (useBytesMessage) {
message = session.createBytesMessage
(message.asInstanceOf[BytesMessage]).writeBytes(bytes)
}
else {
message = session.createTextMessage(payloadString)
}
producer.send(message, DeliveryMode.PERSISTENT, 5, expiry)
if (i != toSend && i % sampleRate == 0) {
var now: Long = System.currentTimeMillis
info("Remainder: " + i + ", rate: " + sampleRate * 1000 / (now - start) + "m/s")
start = now
}
}
connection.syncSendPacket(new ConnectionControl)
connection.close
}
def startBroker(deleteAllMessages: Boolean, checkPointPeriod: Int): Unit = {
broker = new BrokerService
broker.setDeleteAllMessagesOnStartup(deleteAllMessages)
store = createStore
broker.setPersistenceAdapter(store)
broker.addConnector("tcp://0.0.0.0:0")
broker.start
var options: String = "?jms.watchTopicAdvisories=false&jms.useAsyncSend=true&jms.alwaysSessionAsync=false&jms.dispatchAsync=false&socketBufferSize=131072&ioBufferSize=16384&wireFormat.tightEncodingEnabled=false&wireFormat.cacheSize=8192"
connectionFactory = new ActiveMQConnectionFactory(broker.getTransportConnectors.get(0).getConnectUri + options)
}
protected def createStore: LevelDBStore = {
var store: LevelDBStore = new LevelDBStore
store.setDirectory(new File("target/activemq-data/leveldb"))
return store
}
private[leveldb] var broker: BrokerService = null
private[leveldb] var connectionFactory: ActiveMQConnectionFactory = null
private[leveldb] var store: LevelDBStore = null
private[leveldb] var destination: Destination = new ActiveMQQueue("Test")
private[leveldb] var payloadString: String = new String(new Array[Byte](6 * 1024))
private[leveldb] var useBytesMessage: Boolean = true
private[leveldb] final val parallelProducer: Int = 20
private[leveldb] var exceptions: Vector[Exception] = new Vector[Exception]
private[leveldb] var toSend: Long = 100000
private[leveldb] final val sampleRate: Double = 100000
}

View File

@ -1,51 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.test;
import org.apache.activemq.leveldb.LevelDBStore;
import org.apache.activemq.store.PListTestSupport;
/**
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
public class LevelDBPlistTest extends PListTestSupport {
@Override
protected LevelDBStore createPListStore() {
return new LevelDBStore();
}
protected LevelDBStore createConcurrentAddIteratePListStore() {
return new LevelDBStore();
}
@Override
protected LevelDBStore createConcurrentAddRemovePListStore() {
return new LevelDBStore();
}
@Override
protected LevelDBStore createConcurrentAddRemoveWithPreloadPListStore() {
return new LevelDBStore();
}
@Override
protected LevelDBStore createConcurrentAddIterateRemovePListStore(boolean enablePageCache) {
return new LevelDBStore();
}
}

View File

@ -1,42 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.test
import org.apache.activemq.store.PersistenceAdapter
import org.apache.activemq.store.PersistenceAdapterTestSupport
import java.io.File
import org.apache.activemq.leveldb.LevelDBStore
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class LevelDBStoreTest extends PersistenceAdapterTestSupport {
override def testStoreCanHandleDupMessages: Unit = {
}
protected def createPersistenceAdapter(delete: Boolean): PersistenceAdapter = {
var store: LevelDBStore = new LevelDBStore
store.setDirectory(new File("target/activemq-data/haleveldb"))
if (delete) {
store.deleteAllMessages
}
return store
}
}

View File

@ -1,82 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.test;
import org.apache.activemq.ActiveMQConnectionFactory;
import org.apache.activemq.broker.BrokerService;
import org.apache.activemq.broker.region.policy.PolicyEntry;
import org.apache.activemq.broker.region.policy.PolicyMap;
import org.apache.activemq.leveldb.LevelDBStore;
import org.apache.activemq.store.PersistenceAdapter;
import org.apache.tools.ant.util.FileUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import javax.jms.Connection;
import javax.jms.DeliveryMode;
import javax.jms.MessageProducer;
import javax.jms.Session;
import java.io.File;
public class PListTest {
protected BrokerService brokerService;
@Before
public void setUp() throws Exception {
brokerService = new BrokerService();
brokerService.addConnector("tcp://localhost:0");
LevelDBStore store = new LevelDBStore();
store.setDirectory(new File("target/activemq-data/haleveldb"));
store.deleteAllMessages();
brokerService.setPersistenceAdapter(store);
PolicyMap policyMap = new PolicyMap();
PolicyEntry policy = new PolicyEntry();
policy.setMemoryLimit(1);
policyMap.setDefaultEntry(policy);
brokerService.setDestinationPolicy(policyMap);
brokerService.start();
}
@After
public void tearDown() throws Exception {
if (brokerService != null && brokerService.isStopped()) {
brokerService.stop();
}
FileUtils.delete(new File("target/activemq-data/haleveldb"));
}
@Test
public void testBrokerStop() throws Exception {
ActiveMQConnectionFactory factory = new ActiveMQConnectionFactory(brokerService.getTransportConnectors().get(0).getServer().getConnectURI().toString());
Connection conn = factory.createConnection();
Session sess = conn.createSession(false, Session.AUTO_ACKNOWLEDGE);
MessageProducer producer = sess.createProducer(sess.createQueue("TEST"));
producer.setDeliveryMode(DeliveryMode.NON_PERSISTENT);
for (int i = 0; i < 10000; i++) {
producer.send(sess.createTextMessage(i + " message"));
}
brokerService.stop();
brokerService.waitUntilStopped();
}
}

View File

@ -1,331 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.test
import java.util.concurrent.atomic._
import java.util.concurrent.TimeUnit._
import scala.collection.mutable.ListBuffer
object Scenario {
val MESSAGE_ID:Array[Byte] = "message-id"
val NEWLINE = '\n'.toByte
val NANOS_PER_SECOND = NANOSECONDS.convert(1, SECONDS)
implicit def toBytes(value: String):Array[Byte] = value.getBytes("UTF-8")
def o[T](value:T):Option[T] = value match {
case null => None
case x => Some(x)
}
}
trait Scenario {
import Scenario._
var url:String = "tcp://localhost:61616"
var user_name:String = _
var password:String = _
private var _producer_sleep: { def apply(): Int; def init(time: Long) } = new { def apply() = 0; def init(time: Long) {} }
def producer_sleep = _producer_sleep()
def producer_sleep_= (new_value: Int) = _producer_sleep = new { def apply() = new_value; def init(time: Long) {} }
def producer_sleep_= (new_func: { def apply(): Int; def init(time: Long) }) = _producer_sleep = new_func
private var _consumer_sleep: { def apply(): Int; def init(time: Long) } = new { def apply() = 0; def init(time: Long) {} }
def consumer_sleep = _consumer_sleep()
def consumer_sleep_= (new_value: Int) = _consumer_sleep = new { def apply() = new_value; def init(time: Long) {} }
def consumer_sleep_= (new_func: { def apply(): Int; def init(time: Long) }) = _consumer_sleep = new_func
var producers = 1
var producers_per_sample = 0
var consumers = 1
var consumers_per_sample = 0
var sample_interval = 1000
var message_size = 1024
var persistent = false
var headers = Array[Array[(String,String)]]()
var selector:String = null
var no_local = false
var durable = false
var ack_mode = "auto"
var messages_per_connection = -1L
var display_errors = false
var destination_type = "queue"
private var _destination_name: () => String = () => "load"
def destination_name = _destination_name()
def destination_name_=(new_name: String) = _destination_name = () => new_name
def destination_name_=(new_func: () => String) = _destination_name = new_func
var destination_count = 1
val producer_counter = new AtomicLong()
val consumer_counter = new AtomicLong()
val error_counter = new AtomicLong()
val done = new AtomicBoolean()
var queue_prefix = ""
var topic_prefix = ""
var name = "custom"
var drain_timeout = 2000L
def run() = {
print(toString)
println("--------------------------------------")
println(" Running: Press ENTER to stop")
println("--------------------------------------")
println("")
with_load {
// start a sampling client...
val sample_thread = new Thread() {
override def run() = {
def print_rate(name: String, periodCount:Long, totalCount:Long, nanos: Long) = {
val rate_per_second: java.lang.Float = ((1.0f * periodCount / nanos) * NANOS_PER_SECOND)
println("%s total: %,d, rate: %,.3f per second".format(name, totalCount, rate_per_second))
}
try {
var start = System.nanoTime
var total_producer_count = 0L
var total_consumer_count = 0L
var total_error_count = 0L
collection_start
while( !done.get ) {
Thread.sleep(sample_interval)
val end = System.nanoTime
collection_sample
val samples = collection_end
samples.get("p_custom").foreach { case (_, count)::Nil =>
total_producer_count += count
print_rate("Producer", count, total_producer_count, end - start)
case _ =>
}
samples.get("c_custom").foreach { case (_, count)::Nil =>
total_consumer_count += count
print_rate("Consumer", count, total_consumer_count, end - start)
case _ =>
}
samples.get("e_custom").foreach { case (_, count)::Nil =>
if( count!= 0 ) {
total_error_count += count
print_rate("Error", count, total_error_count, end - start)
}
case _ =>
}
start = end
}
} catch {
case e:InterruptedException =>
}
}
}
sample_thread.start()
System.in.read()
done.set(true)
sample_thread.interrupt
sample_thread.join
}
}
override def toString() = {
"--------------------------------------\n"+
"Scenario Settings\n"+
"--------------------------------------\n"+
" destination_type = "+destination_type+"\n"+
" queue_prefix = "+queue_prefix+"\n"+
" topic_prefix = "+topic_prefix+"\n"+
" destination_count = "+destination_count+"\n" +
" destination_name = "+destination_name+"\n" +
" sample_interval (ms) = "+sample_interval+"\n" +
" \n"+
" --- Producer Properties ---\n"+
" producers = "+producers+"\n"+
" message_size = "+message_size+"\n"+
" persistent = "+persistent+"\n"+
" producer_sleep (ms) = "+producer_sleep+"\n"+
" headers = "+headers.mkString(", ")+"\n"+
" \n"+
" --- Consumer Properties ---\n"+
" consumers = "+consumers+"\n"+
" consumer_sleep (ms) = "+consumer_sleep+"\n"+
" selector = "+selector+"\n"+
" durable = "+durable+"\n"+
""
}
protected def headers_for(i:Int) = {
if ( headers.isEmpty ) {
Array[(String, String)]()
} else {
headers(i%headers.size)
}
}
var producer_samples:Option[ListBuffer[(Long,Long)]] = None
var consumer_samples:Option[ListBuffer[(Long,Long)]] = None
var error_samples = ListBuffer[(Long,Long)]()
def collection_start: Unit = {
producer_counter.set(0)
consumer_counter.set(0)
error_counter.set(0)
producer_samples = if (producers > 0 || producers_per_sample>0 ) {
Some(ListBuffer[(Long,Long)]())
} else {
None
}
consumer_samples = if (consumers > 0 || consumers_per_sample>0 ) {
Some(ListBuffer[(Long,Long)]())
} else {
None
}
}
def collection_end: Map[String, scala.List[(Long,Long)]] = {
var rc = Map[String, List[(Long,Long)]]()
producer_samples.foreach{ samples =>
rc += "p_"+name -> samples.toList
samples.clear
}
consumer_samples.foreach{ samples =>
rc += "c_"+name -> samples.toList
samples.clear
}
rc += "e_"+name -> error_samples.toList
error_samples.clear
rc
}
trait Client {
def start():Unit
def shutdown():Unit
}
var producer_clients = List[Client]()
var consumer_clients = List[Client]()
def with_load[T](func: =>T ):T = {
done.set(false)
_producer_sleep.init(System.currentTimeMillis())
_consumer_sleep.init(System.currentTimeMillis())
for (i <- 0 until producers) {
val client = createProducer(i)
producer_clients ::= client
client.start()
}
for (i <- 0 until consumers) {
val client = createConsumer(i)
consumer_clients ::= client
client.start()
}
try {
func
} finally {
done.set(true)
// wait for the threads to finish..
for( client <- consumer_clients ) {
client.shutdown
}
consumer_clients = List()
for( client <- producer_clients ) {
client.shutdown
}
producer_clients = List()
}
}
def drain = {
done.set(false)
if( destination_type=="queue" || destination_type=="raw_queue" || durable==true ) {
print("draining")
consumer_counter.set(0)
var consumer_clients = List[Client]()
for (i <- 0 until destination_count) {
val client = createConsumer(i)
consumer_clients ::= client
client.start()
}
// Keep sleeping until we stop draining messages.
var drained = 0L
try {
Thread.sleep(drain_timeout);
def done() = {
val c = consumer_counter.getAndSet(0)
drained += c
c == 0
}
while( !done ) {
print(".")
Thread.sleep(drain_timeout);
}
} finally {
done.set(true)
for( client <- consumer_clients ) {
client.shutdown
}
println(". (drained %d)".format(drained))
}
}
}
def collection_sample: Unit = {
val now = System.currentTimeMillis()
producer_samples.foreach(_.append((now, producer_counter.getAndSet(0))))
consumer_samples.foreach(_.append((now, consumer_counter.getAndSet(0))))
error_samples.append((now, error_counter.getAndSet(0)))
// we might need to increment number the producers..
for (i <- 0 until producers_per_sample) {
val client = createProducer(producer_clients.length)
producer_clients ::= client
client.start()
}
// we might need to increment number the consumers..
for (i <- 0 until consumers_per_sample) {
val client = createConsumer(consumer_clients.length)
consumer_clients ::= client
client.start()
}
}
def createProducer(i:Int):Client
def createConsumer(i:Int):Client
}

View File

@ -1,51 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.leveldb.test
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.hdfs.MiniDFSCluster
import java.io.IOException
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
object TestingHDFSServer {
private[leveldb] def start: Unit = {
var conf: Configuration = new Configuration
cluster = new MiniDFSCluster(conf, 1, true, null)
cluster.waitActive
fs = cluster.getFileSystem
}
private[leveldb] def stop: Unit = {
try {
cluster.shutdown
}
catch {
case e: Throwable => {
e.printStackTrace
}
}
}
private[leveldb] var cluster: MiniDFSCluster = null
private[leveldb] var fs: FileSystem = null
}

View File

@ -60,13 +60,11 @@
org.jasypt*;resolution:=optional, org.jasypt*;resolution:=optional,
org.eclipse.jetty*;resolution:=optional;version="[9.0,10)", org.eclipse.jetty*;resolution:=optional;version="[9.0,10)",
org.apache.zookeeper*;resolution:=optional, org.apache.zookeeper*;resolution:=optional,
org.fusesource.leveldbjni*;resolution:=optional,
org.fusesource.hawtjni*;resolution:=optional, org.fusesource.hawtjni*;resolution:=optional,
org.springframework.jms*;version="[4,5)";resolution:=optional, org.springframework.jms*;version="[4,5)";resolution:=optional,
org.springframework.transaction*;version="[4,5)";resolution:=optional, org.springframework.transaction*;version="[4,5)";resolution:=optional,
org.springframework*;version="[4,5)";resolution:=optional, org.springframework*;version="[4,5)";resolution:=optional,
org.xmlpull*;resolution:=optional, org.xmlpull*;resolution:=optional,
scala*;resolution:=optional,
javax.annotation*;version="[1,4)", javax.annotation*;version="[1,4)",
!com.thoughtworks.qdox*, !com.thoughtworks.qdox*,
org.apache.commons.logging;version="[1.2,2)";resolution:=optional, org.apache.commons.logging;version="[1.2,2)";resolution:=optional,
@ -125,11 +123,6 @@
<groupId>${project.groupId}</groupId> <groupId>${project.groupId}</groupId>
<artifactId>activemq-jdbc-store</artifactId> <artifactId>activemq-jdbc-store</artifactId>
</dependency> </dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>activemq-leveldb-store</artifactId>
<version>${project.version}</version>
</dependency>
<!-- Additional protocol impls --> <!-- Additional protocol impls -->
<dependency> <dependency>
@ -284,28 +277,8 @@
*;groupId=org.apache.activemq;inline=META-INF/services/*, *;groupId=org.apache.activemq;inline=META-INF/services/*,
*;groupId=org.apache.qpid;inline=META-INF/services/*, *;groupId=org.apache.qpid;inline=META-INF/services/*,
*;groupId=org.apache.xbean;inline=true *;groupId=org.apache.xbean;inline=true
<!--
groupId=org.fusesource.leveldbjni;inline=META-INF/native/*,
groupId=org.xerial.snappy;inline=org/xerial/snappy/*
-->
</Embed-Dependency> </Embed-Dependency>
<Embed-Transitive>true</Embed-Transitive> <Embed-Transitive>true</Embed-Transitive>
<!--
<Bundle-NativeCode>
META-INF/native/windows32/leveldbjni.dll;osname=Win32;processor=x86,
META-INF/native/windows64/leveldbjni.dll;osname=Win32;processor=x86-64,
META-INF/native/osx/libleveldbjni.jnilib;osname=macosx,
META-INF/native/linux32/libleveldbjni.so;osname=Linux;processor=x86,
META-INF/native/linux64/libleveldbjni.so;osname=Linux;processor=x86-64,
org/xerial/snappy/native/Windows/amd64/snappyjava.dll;osname=win32;processor=x86-64,
org/xerial/snappy/native/Windows/x86/snappyjava.dll;osname=win32;processor=x86,
org/xerial/snappy/native/Mac/x86_64/libsnappyjava.jnilib;osname=macosx;processor=x86-64,
org/xerial/snappy/native/Linux/amd64/libsnappyjava.so;osname=linux;processor=x86-64,
org/xerial/snappy/native/Linux/i386/libsnappyjava.so;osname=linux;processor=x86,
org/xerial/snappy/native/Linux/arm/libsnappyjava.so;osname=linux;processor=arm,
*
</Bundle-NativeCode>
-->
</instructions> </instructions>
</configuration> </configuration>
</plugin> </plugin>
@ -460,13 +433,6 @@
<classifier>sources</classifier> <classifier>sources</classifier>
<optional>true</optional> <optional>true</optional>
</dependency> </dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>activemq-leveldb-store</artifactId>
<version>${project.version}</version>
<classifier>sources</classifier>
<optional>true</optional>
</dependency>
<dependency> <dependency>
<groupId>org.apache.activemq.protobuf</groupId> <groupId>org.apache.activemq.protobuf</groupId>
<artifactId>activemq-protobuf</artifactId> <artifactId>activemq-protobuf</artifactId>

View File

@ -45,10 +45,6 @@
<scope>compile</scope> <scope>compile</scope>
</dependency> </dependency>
<dependency>
<groupId>org.apache.activemq</groupId>
<artifactId>activemq-leveldb-store</artifactId>
</dependency>
<dependency> <dependency>
<groupId>org.linkedin</groupId> <groupId>org.linkedin</groupId>
<artifactId>org.linkedin.zookeeper-impl</artifactId> <artifactId>org.linkedin.zookeeper-impl</artifactId>

View File

@ -14,7 +14,19 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.activemq.leveldb.replicated.groups; package org.apache.activemq.partition;
import org.apache.zookeeper.*;
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.data.Id;
import org.apache.zookeeper.data.Stat;
import org.linkedin.util.clock.Clock;
import org.linkedin.util.clock.SystemClock;
import org.linkedin.util.clock.Timespan;
import org.linkedin.util.concurrent.ConcurrentUtils;
import org.linkedin.util.io.PathUtils;
import org.linkedin.zookeeper.client.*;
import org.slf4j.Logger;
import java.io.UnsupportedEncodingException; import java.io.UnsupportedEncodingException;
import java.lang.reflect.Field; import java.lang.reflect.Field;
@ -26,43 +38,19 @@ import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeoutException; import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.zookeeper.CreateMode; public class ZKClient extends org.linkedin.zookeeper.client.AbstractZKClient implements Watcher {
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.ZooDefs;
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.data.Id;
import org.apache.zookeeper.data.Stat;
import org.linkedin.util.clock.Clock;
import org.linkedin.util.clock.SystemClock;
import org.linkedin.util.clock.Timespan;
import org.linkedin.util.concurrent.ConcurrentUtils;
import org.linkedin.util.io.PathUtils;
import org.linkedin.zookeeper.client.ChrootedZKClient;
import org.linkedin.zookeeper.client.IZooKeeper;
import org.linkedin.zookeeper.client.IZooKeeperFactory;
import org.linkedin.zookeeper.client.LifecycleListener;
import org.linkedin.zookeeper.client.ZooKeeperFactory;
import org.osgi.framework.InvalidSyntaxException;
import org.osgi.service.cm.ConfigurationException;
import org.slf4j.Logger;
public class ZKClient extends org.linkedin.zookeeper.client.AbstractZKClient implements Watcher { private static final Logger LOG = org.slf4j.LoggerFactory.getLogger(ZKClient.class);
private static final Logger LOG = org.slf4j.LoggerFactory.getLogger(ZKClient.class.getName());
private Map<String, String> acls; private Map<String, String> acls;
private String password; private String password;
public void start() throws Exception { public void start() throws Exception {
// Grab the lock to make sure that the registration of the ManagedService // Grab the lock to make sure that the registration of the ManagedService
// won't be updated immediately but that the initial update will happen first // won't be updated immediately but that the initial update will happen first
synchronized (_lock) { synchronized (_lock) {
_stateChangeDispatcher.setDaemon(true); _stateChangeDispatcher.setDaemon(true);
_stateChangeDispatcher.start(); _stateChangeDispatcher.start();
doStart(); doStart();
} }
} }
@ -75,7 +63,7 @@ public class ZKClient extends org.linkedin.zookeeper.client.AbstractZKClient imp
this.password = password; this.password = password;
} }
protected void doStart() throws InvalidSyntaxException, ConfigurationException, UnsupportedEncodingException { protected void doStart() throws UnsupportedEncodingException {
connect(); connect();
} }
@ -85,7 +73,7 @@ public class ZKClient extends org.linkedin.zookeeper.client.AbstractZKClient imp
_stateChangeDispatcher.end(); _stateChangeDispatcher.end();
try { try {
_stateChangeDispatcher.join(1000); _stateChangeDispatcher.join(1000);
} catch(Exception e) { } catch (Exception e) {
LOG.debug("ignored exception", e); LOG.debug("ignored exception", e);
} }
} }
@ -94,19 +82,12 @@ public class ZKClient extends org.linkedin.zookeeper.client.AbstractZKClient imp
try { try {
changeState(State.NONE); changeState(State.NONE);
_zk.close(); _zk.close();
// We try to avoid a NPE when shutting down fabric:
// java.lang.NullPointerException
// at org.apache.felix.framework.BundleWiringImpl.findClassOrResourceByDelegation(BundleWiringImpl.java:1433)
// at org.apache.felix.framework.BundleWiringImpl.access$400(BundleWiringImpl.java:73)
// at org.apache.felix.framework.BundleWiringImpl$BundleClassLoader.loadClass(BundleWiringImpl.java:1844)
// at java.lang.ClassLoader.loadClass(ClassLoader.java:247)
// at org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1089)
Thread th = getSendThread(); Thread th = getSendThread();
if (th != null) { if (th != null) {
th.join(1000); th.join(1000);
} }
_zk = null; _zk = null;
} catch(Exception e) { } catch (Exception e) {
LOG.debug("ignored exception", e); LOG.debug("ignored exception", e);
} }
} }
@ -154,7 +135,7 @@ public class ZKClient extends org.linkedin.zookeeper.client.AbstractZKClient imp
public void testGenerateConnectionLoss() throws Exception { public void testGenerateConnectionLoss() throws Exception {
waitForConnected(); waitForConnected();
Object clientCnxnSocket = getField(_zk, "_zk", "cnxn", "sendThread", "clientCnxnSocket"); Object clientCnxnSocket = getField(_zk, "_zk", "cnxn", "sendThread", "clientCnxnSocket");
callMethod(clientCnxnSocket, "testableCloseSocket"); callMethod(clientCnxnSocket, "testableCloseSocket");
} }
@ -200,18 +181,15 @@ public class ZKClient extends org.linkedin.zookeeper.client.AbstractZKClient imp
if (event.getState() != null) { if (event.getState() != null) {
LOG.debug("event: {}", event.getState()); LOG.debug("event: {}", event.getState());
synchronized (_lock) { synchronized (_lock) {
switch(event.getState()) switch(event.getState()) {
{
case SyncConnected: case SyncConnected:
changeState(State.CONNECTED); changeState(State.CONNECTED);
break; break;
case Disconnected: case Disconnected:
if(_state != State.NONE) { if (_state != State.NONE) {
changeState(State.RECONNECTING); changeState(State.RECONNECTING);
} }
break; break;
case Expired: case Expired:
// when expired, the zookeeper object is invalid and we need to recreate a new one // when expired, the zookeeper object is invalid and we need to recreate a new one
_zk = null; _zk = null;
@ -219,7 +197,7 @@ public class ZKClient extends org.linkedin.zookeeper.client.AbstractZKClient imp
tryConnect(); tryConnect();
break; break;
default: default:
LOG.warn("unprocessed event state: {}", event.getState()); LOG.warn("Unsupported event state: {}", event.getState());
} }
} }
} }
@ -230,7 +208,7 @@ public class ZKClient extends org.linkedin.zookeeper.client.AbstractZKClient imp
State state = _state; State state = _state;
if (state == State.NONE) { if (state == State.NONE) {
throw new IllegalStateException("ZooKeeper client has not been configured yet. You need to either create an ensemble or join one."); throw new IllegalStateException("ZooKeeper client has not been configured yet. You need to either create an ensemble or join one.");
} else if (state != State.CONNECTED) { } else if (state != State.CONNECTING) {
try { try {
waitForConnected(); waitForConnected();
} catch (Exception e) { } catch (Exception e) {
@ -270,7 +248,6 @@ public class ZKClient extends org.linkedin.zookeeper.client.AbstractZKClient imp
} }
if (!_listeners.contains(listener)) { if (!_listeners.contains(listener)) {
_listeners.add(listener); _listeners.add(listener);
} }
if (_state == State.CONNECTED) { if (_state == State.CONNECTED) {
listener.onConnected(); listener.onConnected();
@ -315,7 +292,7 @@ public class ZKClient extends org.linkedin.zookeeper.client.AbstractZKClient imp
private final static String CHARSET = "UTF-8"; private final static String CHARSET = "UTF-8";
private final Clock _clock = SystemClock.instance(); private final Clock _clock = SystemClock.instance();
private final List<LifecycleListener> _listeners = new CopyOnWriteArrayList<LifecycleListener>(); private final List<LifecycleListener> _listeners = new CopyOnWriteArrayList<>();
protected final Object _lock = new Object(); protected final Object _lock = new Object();
protected volatile State _state = State.NONE; protected volatile State _state = State.NONE;
@ -331,7 +308,7 @@ public class ZKClient extends org.linkedin.zookeeper.client.AbstractZKClient imp
private class StateChangeDispatcher extends Thread { private class StateChangeDispatcher extends Thread {
private final AtomicBoolean _running = new AtomicBoolean(true); private final AtomicBoolean _running = new AtomicBoolean(true);
private final BlockingQueue<Boolean> _events = new LinkedBlockingQueue<Boolean>(); private final BlockingQueue<Boolean> _events = new LinkedBlockingQueue<>();
private StateChangeDispatcher() { private StateChangeDispatcher() {
super("ZooKeeper state change dispatcher thread"); super("ZooKeeper state change dispatcher thread");
@ -339,7 +316,7 @@ public class ZKClient extends org.linkedin.zookeeper.client.AbstractZKClient imp
@Override @Override
public void run() { public void run() {
Map<Object, Boolean> history = new IdentityHashMap<Object, Boolean>(); Map<Object, Boolean> history = new IdentityHashMap<>();
LOG.info("Starting StateChangeDispatcher"); LOG.info("Starting StateChangeDispatcher");
while (_running.get()) { while (_running.get()) {
Boolean isConnectedEvent; Boolean isConnectedEvent;
@ -375,7 +352,7 @@ public class ZKClient extends org.linkedin.zookeeper.client.AbstractZKClient imp
} }
protected Map<Object, Boolean> callListeners(Map<Object, Boolean> history, Boolean connectedEvent) { protected Map<Object, Boolean> callListeners(Map<Object, Boolean> history, Boolean connectedEvent) {
Map<Object, Boolean> newHistory = new IdentityHashMap<Object, Boolean>(); Map<Object, Boolean> newHistory = new IdentityHashMap<>();
for (LifecycleListener listener : _listeners) { for (LifecycleListener listener : _listeners) {
Boolean previousEvent = history.get(listener); Boolean previousEvent = history.get(listener);
// we propagate the event only if it was not already sent // we propagate the event only if it was not already sent
@ -396,6 +373,7 @@ public class ZKClient extends org.linkedin.zookeeper.client.AbstractZKClient imp
} }
private class ExpiredSessionRecovery extends Thread { private class ExpiredSessionRecovery extends Thread {
private ExpiredSessionRecovery() { private ExpiredSessionRecovery() {
super("ZooKeeper expired session recovery thread"); super("ZooKeeper expired session recovery thread");
} }
@ -403,19 +381,19 @@ public class ZKClient extends org.linkedin.zookeeper.client.AbstractZKClient imp
@Override @Override
public void run() { public void run() {
LOG.info("Entering recovery mode"); LOG.info("Entering recovery mode");
synchronized(_lock) { synchronized (_lock) {
try { try {
int count = 0; int count = 0;
while (_state == ZKClient.State.NONE) { while (_state == ZKClient.State.NONE) {
try { try {
count++; count++;
LOG.warn("Recovery mode: trying to reconnect to zookeeper [" + count + "]"); LOG.warn("Recovery mode: trying to reconnect to zookeeper [{}]", count);
ZKClient.this.connect(); ZKClient.this.connect();
} catch (Throwable e) { } catch (Throwable e) {
LOG.warn("Recovery mode: reconnect attempt failed [" + count + "]... waiting for " + _reconnectTimeout, e); LOG.warn("Recovery mode: reconnect attempt failed [{}]... waiting for {}", count, _reconnectTimeout, e);
try { try {
_lock.wait(_reconnectTimeout.getDurationInMilliseconds()); _lock.wait(_reconnectTimeout.getDurationInMilliseconds());
} catch(InterruptedException e1) { } catch (InterruptedException e1) {
throw new RuntimeException("Recovery mode: wait interrupted... bailing out", e1); throw new RuntimeException("Recovery mode: wait interrupted... bailing out", e1);
} }
} }
@ -426,32 +404,21 @@ public class ZKClient extends org.linkedin.zookeeper.client.AbstractZKClient imp
} }
} }
} }
} }
/** public ZKClient(String connectString, Timespan sessionTimeout, Watcher watcher) {
* Constructor
*/
public ZKClient(String connectString, Timespan sessionTimeout, Watcher watcher)
{
this(new ZooKeeperFactory(connectString, sessionTimeout, watcher)); this(new ZooKeeperFactory(connectString, sessionTimeout, watcher));
} }
/** public ZKClient(IZooKeeperFactory factory) {
* Constructor
*/
public ZKClient(IZooKeeperFactory factory)
{
this(factory, null); this(factory, null);
} }
/** public ZKClient(IZooKeeperFactory factory, String chroot) {
* Constructor
*/
public ZKClient(IZooKeeperFactory factory, String chroot)
{
super(chroot); super(chroot);
_factory = factory; _factory = factory;
Map<String, String> acls = new HashMap<String, String>(); Map<String, String> acls = new HashMap<>();
acls.put("/", "world:anyone:acdrw"); acls.put("/", "world:anyone:acdrw");
setACLs(acls); setACLs(acls);
} }
@ -476,28 +443,25 @@ public class ZKClient extends org.linkedin.zookeeper.client.AbstractZKClient imp
perm |= ZooDefs.Perms.ADMIN; perm |= ZooDefs.Perms.ADMIN;
break; break;
default: default:
System.err System.err.println("Unknown perm type:" + permString.charAt(i));
.println("Unknown perm type: " + permString.charAt(i));
} }
} }
return perm; return perm;
} }
private static List<ACL> parseACLs(String aclString) { private static List<ACL> parseACLs(String aclString) {
List<ACL> acl; List<ACL> acl;
String acls[] = aclString.split(","); String acls[] = aclString.split(",");
acl = new ArrayList<ACL>(); acl = new ArrayList<>();
for (String a : acls) { for (String a : acls) {
int firstColon = a.indexOf(':'); int firstColon = a.indexOf(':');
int lastColon = a.lastIndexOf(':'); int lastColon = a.lastIndexOf(':');
if (firstColon == -1 || lastColon == -1 || firstColon == lastColon) { if (firstColon == -1 || lastColon == -1 || firstColon == lastColon) {
System.err System.err.println(a + " does not have the form scheme:id:perm");
.println(a + " does not have the form scheme:id:perm");
continue; continue;
} }
ACL newAcl = new ACL(); ACL newAcl = new ACL();
newAcl.setId(new Id(a.substring(0, firstColon), a.substring( newAcl.setId(new Id(a.substring(0, firstColon), a.substring(firstColon + 1, lastColon)));
firstColon + 1, lastColon)));
newAcl.setPerms(getPermFromString(a.substring(lastColon + 1))); newAcl.setPerms(getPermFromString(a.substring(lastColon + 1)));
acl.add(newAcl); acl.add(newAcl);
} }
@ -511,7 +475,7 @@ public class ZKClient extends org.linkedin.zookeeper.client.AbstractZKClient imp
try { try {
createBytesNodeWithParents(path, data, acl, createMode); createBytesNodeWithParents(path, data, acl, createMode);
return null; return null;
} catch(KeeperException.NodeExistsException e) { } catch (KeeperException.NodeExistsException e) {
// this should not happen very often (race condition) // this should not happen very often (race condition)
return setByteData(path, data); return setByteData(path, data);
} }
@ -595,20 +559,20 @@ public class ZKClient extends org.linkedin.zookeeper.client.AbstractZKClient imp
private void createParents(String path) throws InterruptedException, KeeperException { private void createParents(String path) throws InterruptedException, KeeperException {
path = PathUtils.getParentPath(adjustPath(path)); path = PathUtils.getParentPath(adjustPath(path));
path = PathUtils.removeTrailingSlash(path); path = PathUtils.removeTrailingSlash(path);
List<String> paths = new ArrayList<String>(); List<String> paths = new ArrayList<>();
while(!path.equals("") && getZk().exists(path, false) == null) { while (!path.equals("") && getZk().exists(path, false) == null) {
paths.add(path); paths.add(path);
path = PathUtils.getParentPath(path); path = PathUtils.getParentPath(path);
path = PathUtils.removeTrailingSlash(path); path = PathUtils.removeTrailingSlash(path);
} }
Collections.reverse(paths); Collections.reverse(paths);
for(String p : paths) { for (String p : paths) {
try { try {
getZk().create(p, getZk().create(p,
null, null,
getNodeACLs(p), getNodeACLs(p),
CreateMode.PERSISTENT); CreateMode.PERSISTENT);
} catch(KeeperException.NodeExistsException e) { } catch (KeeperException.NodeExistsException e) {
// ok we continue... // ok we continue...
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("parent already exists " + p); LOG.debug("parent already exists " + p);
@ -623,9 +587,10 @@ public class ZKClient extends org.linkedin.zookeeper.client.AbstractZKClient imp
} else { } else {
try { try {
return data.getBytes(CHARSET); return data.getBytes(CHARSET);
} catch(UnsupportedEncodingException e) { } catch (UnsupportedEncodingException e) {
throw new RuntimeException(e); throw new RuntimeException(e);
} }
} }
} }
} }

View File

@ -17,7 +17,6 @@
package org.apache.activemq.partition; package org.apache.activemq.partition;
import org.apache.activemq.broker.Broker; import org.apache.activemq.broker.Broker;
import org.apache.activemq.leveldb.replicated.groups.ZKClient;
import org.apache.activemq.partition.dto.Partitioning; import org.apache.activemq.partition.dto.Partitioning;
import org.apache.zookeeper.WatchedEvent; import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher; import org.apache.zookeeper.Watcher;

View File

@ -18,7 +18,6 @@ package org.apache.activemq.partition;
import org.apache.activemq.broker.BrokerPlugin; import org.apache.activemq.broker.BrokerPlugin;
import org.apache.activemq.broker.BrokerService; import org.apache.activemq.broker.BrokerService;
import org.apache.activemq.leveldb.replicated.groups.ZKClient;
import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.server.NIOServerCnxnFactory; import org.apache.zookeeper.server.NIOServerCnxnFactory;
import org.apache.zookeeper.server.ZooKeeperServer; import org.apache.zookeeper.server.ZooKeeperServer;
@ -49,7 +48,6 @@ public class ZooKeeperPartitionBrokerTest extends PartitionBrokerTest {
super.setUp(); super.setUp();
} }
@After @After
public void tearDown() throws Exception { public void tearDown() throws Exception {
super.tearDown(); super.tearDown();

View File

@ -83,6 +83,12 @@
<type>test-jar</type> <type>test-jar</type>
<scope>test</scope> <scope>test</scope>
</dependency> </dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>${commons-logging-version}</version>
<scope>test</scope>
</dependency>
<!-- <dependency> <!-- <dependency>
<groupId>${project.groupId}</groupId> <groupId>${project.groupId}</groupId>
<artifactId>activemq-jaas</artifactId> <artifactId>activemq-jaas</artifactId>

View File

@ -74,11 +74,6 @@
<artifactId>activeio-core</artifactId> <artifactId>activeio-core</artifactId>
<optional>true</optional> <optional>true</optional>
</dependency> </dependency>
<dependency>
<groupId>org.apache.activemq</groupId>
<artifactId>activemq-leveldb-store</artifactId>
<optional>true</optional>
</dependency>
<!-- add the optional replication deps --> <!-- add the optional replication deps -->
<dependency> <dependency>
@ -219,7 +214,6 @@
<include>${basedir}/../activemq-client/src/main/java</include> <include>${basedir}/../activemq-client/src/main/java</include>
<include>${basedir}/../activemq-broker/src/main/java</include> <include>${basedir}/../activemq-broker/src/main/java</include>
<include>${basedir}/../activemq-camel/src/main/java</include> <include>${basedir}/../activemq-camel/src/main/java</include>
<include>${basedir}/../activemq-leveldb-store/src/main/java</include>
<include>${basedir}/../activemq-jdbc-store/src/main/java</include> <include>${basedir}/../activemq-jdbc-store/src/main/java</include>
<include>${basedir}/../activemq-kahadb-store/src/main/java</include> <include>${basedir}/../activemq-kahadb-store/src/main/java</include>
<include>${basedir}/../activemq-mqtt/src/main/java</include> <include>${basedir}/../activemq-mqtt/src/main/java</include>
@ -281,29 +275,29 @@
</plugins> </plugins>
</build> </build>
<profiles> <profiles>
<profile> <profile>
<id>activemq.tests-sanity</id> <id>activemq.tests-sanity</id>
<activation> <activation>
<property> <property>
<name>activemq.tests</name> <name>activemq.tests</name>
<value>smoke</value> <value>smoke</value>
</property> </property>
</activation> </activation>
<build> <build>
<plugins> <plugins>
<plugin> <plugin>
<artifactId>maven-surefire-plugin</artifactId> <artifactId>maven-surefire-plugin</artifactId>
<configuration> <configuration>
<includes> <includes>
<include>**/SpringBrokerFactoryTest.*</include> <include>**/SpringBrokerFactoryTest.*</include>
</includes> </includes>
</configuration> </configuration>
</plugin> </plugin>
</plugins> </plugins>
</build> </build>
</profile> </profile>
<profile> <profile>
<id>activemq.tests-autoTransport</id> <id>activemq.tests-autoTransport</id>
<activation> <activation>
<property> <property>
@ -324,110 +318,6 @@
</plugins> </plugins>
</build> </build>
</profile> </profile>
</profiles>
<profile>
<id>activemq.tests.windows.excludes</id>
<activation>
<os>
<family>Windows</family>
</os>
</activation>
<build>
<plugins>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<excludes combine.children="append">
<exclude>**/LevelDBConfigTest.*</exclude>
</excludes>
</configuration>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>activemq.tests.solaris.excludes</id>
<activation>
<property>
<name>os.name</name>
<value>SunOS</value>
</property>
</activation>
<build>
<plugins>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<excludes combine.children="append">
<exclude>**/LevelDBConfigTest.*</exclude>
</excludes>
</configuration>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>activemq.tests.aix.excludes</id>
<activation>
<property>
<name>os.name</name>
<value>AIX</value>
</property>
</activation>
<build>
<plugins>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<excludes combine.children="append">
<exclude>**/LevelDBConfigTest.*</exclude>
</excludes>
</configuration>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>activemq.tests.mac.excludes</id>
<activation>
<os>
<family>mac</family>
</os>
</activation>
<build>
<plugins>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<excludes combine.children="append">
<exclude>**/LevelDBConfigTest.*</exclude>
</excludes>
</configuration>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>activemq.tests.hpux.excludes</id>
<activation>
<os>
<family>HP-UX</family>
</os>
</activation>
<build>
<plugins>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<excludes combine.children="append">
<exclude>**/LevelDBConfigTest.*</exclude>
</excludes>
</configuration>
</plugin>
</plugins>
</build>
</profile>
</profiles>
</project> </project>

View File

@ -1,122 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.store.leveldb;
import java.io.File;
import junit.framework.TestCase;
import org.apache.activemq.broker.BrokerService;
import org.apache.activemq.usage.SystemUsage;
import org.apache.activemq.xbean.BrokerFactoryBean;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.core.io.ClassPathResource;
import org.springframework.core.io.Resource;
/**
*
*/
public class LevelDBConfigTest extends TestCase {
protected static final String CONF_ROOT = "src/test/resources/org/apache/activemq/store/leveldb/";
private static final Logger LOG = LoggerFactory.getLogger(LevelDBConfigTest.class);
/*
* This tests configuring the different broker properties using
* xbeans-spring
*/
public void testBrokerConfig() throws Exception {
BrokerService broker;
// Create broker from resource
// System.out.print("Creating broker... ");
broker = createBroker("org/apache/activemq/store/leveldb/leveldb.xml");
LOG.info("Success");
try {
// Check broker configuration
// System.out.print("Checking broker configurations... ");
assertEquals("Broker Config Error (brokerName)", "brokerConfigTest", broker.getBrokerName());
assertEquals("Broker Config Error (populateJMSXUserID)", false, broker.isPopulateJMSXUserID());
assertEquals("Broker Config Error (useLoggingForShutdownErrors)", true, broker.isUseLoggingForShutdownErrors());
assertEquals("Broker Config Error (useJmx)", true, broker.isUseJmx());
assertEquals("Broker Config Error (persistent)", true, broker.isPersistent());
assertEquals("Broker Config Error (useShutdownHook)", false, broker.isUseShutdownHook());
assertEquals("Broker Config Error (deleteAllMessagesOnStartup)", true, broker.isDeleteAllMessagesOnStartup());
LOG.info("Success");
// Check specific vm transport
// System.out.print("Checking vm connector... ");
assertEquals("Should have a specific VM Connector", "vm://javacoola", broker.getVmConnectorURI().toString());
LOG.info("Success");
// Check usage manager
// System.out.print("Checking memory manager configurations... ");
SystemUsage systemUsage = broker.getSystemUsage();
assertTrue("Should have a SystemUsage", systemUsage != null);
assertEquals("SystemUsage Config Error (MemoryUsage.limit)", 1024 * 1024 * 10, systemUsage.getMemoryUsage().getLimit());
assertEquals("SystemUsage Config Error (MemoryUsage.percentUsageMinDelta)", 20, systemUsage.getMemoryUsage().getPercentUsageMinDelta());
assertEquals("SystemUsage Config Error (TempUsage.limit)", 1024 * 1024 * 100, systemUsage.getTempUsage().getLimit());
assertEquals("SystemUsage Config Error (StoreUsage.limit)", 1024 * 1024 * 1024, systemUsage.getStoreUsage().getLimit());
assertEquals("SystemUsage Config Error (StoreUsage.name)", "foo", systemUsage.getStoreUsage().getName());
assertNotNull(systemUsage.getStoreUsage().getStore());
assertTrue(systemUsage.getStoreUsage().getStore() instanceof LevelDBPersistenceAdapter);
LOG.info("Success");
} finally {
if (broker != null) {
broker.stop();
}
}
}
/*
* TODO: Create additional tests for forwarding bridges
*/
protected static void recursiveDelete(File file) {
if (file.isDirectory()) {
File[] files = file.listFiles();
for (int i = 0; i < files.length; i++) {
recursiveDelete(files[i]);
}
}
file.delete();
}
protected BrokerService createBroker(String resource) throws Exception {
return createBroker(new ClassPathResource(resource));
}
protected BrokerService createBroker(Resource resource) throws Exception {
BrokerFactoryBean factory = new BrokerFactoryBean(resource);
factory.afterPropertiesSet();
BrokerService broker = factory.getBroker();
assertTrue("Should have a broker!", broker != null);
// Broker is already started by default when using the XML file
// broker.start();
return broker;
}
}

View File

@ -1,55 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<beans
xmlns="http://www.springframework.org/schema/beans"
xmlns:amq="http://activemq.apache.org/schema/core"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-2.0.xsd
http://activemq.apache.org/schema/core http://activemq.apache.org/schema/core/activemq-core.xsd">
<!-- normal ActiveMQ XML config which is less verbose & can be validated -->
<amq:broker brokerName="brokerConfigTest" populateJMSXUserID="false"
useLoggingForShutdownErrors="true" useJmx="true"
persistent="true" vmConnectorURI="vm://javacoola"
useShutdownHook="false" deleteAllMessagesOnStartup="true">
<amq:persistenceAdapter>
<amq:levelDB directory = "target/activemq-data"/>
</amq:persistenceAdapter>
<amq:systemUsage>
<amq:systemUsage>
<amq:memoryUsage>
<amq:memoryUsage limit="10 mb" percentUsageMinDelta="20"/>
</amq:memoryUsage>
<amq:storeUsage>
<amq:storeUsage limit="1 gb" name="foo"/>
</amq:storeUsage>
<amq:tempUsage>
<amq:tempUsage limit="100 mb"/>
</amq:tempUsage>
</amq:systemUsage>
</amq:systemUsage>
<amq:transportConnectors>
<amq:transportConnector uri="tcp://localhost:61635"/>
</amq:transportConnectors>
</amq:broker>
</beans>

View File

@ -46,10 +46,6 @@
<groupId>org.apache.activemq</groupId> <groupId>org.apache.activemq</groupId>
<artifactId>activemq-kahadb-store</artifactId> <artifactId>activemq-kahadb-store</artifactId>
</dependency> </dependency>
<dependency>
<groupId>org.apache.activemq</groupId>
<artifactId>activemq-leveldb-store</artifactId>
</dependency>
<dependency> <dependency>
<groupId>org.apache.activemq</groupId> <groupId>org.apache.activemq</groupId>
<artifactId>activemq-console</artifactId> <artifactId>activemq-console</artifactId>
@ -669,11 +665,8 @@
<!-- breaks hudson: disable till we get a chance to give it the time that it needs - http://hudson.zones.apache.org/hudson/job/ActiveMQ/org.apache.activemq$activemq-core/199/testReport/org.apache.activemq.network/BrokerNetworkWithStuckMessagesTest/testBrokerNetworkWithStuckMessages/ --> <!-- breaks hudson: disable till we get a chance to give it the time that it needs - http://hudson.zones.apache.org/hudson/job/ActiveMQ/org.apache.activemq$activemq-core/199/testReport/org.apache.activemq.network/BrokerNetworkWithStuckMessagesTest/testBrokerNetworkWithStuckMessages/ -->
<exclude>**/BrokerNetworkWithStuckMessagesTest.*</exclude> <exclude>**/BrokerNetworkWithStuckMessagesTest.*</exclude>
<exclude>**/DoSTest.*</exclude> <exclude>**/DoSTest.*</exclude>
<!-- journal over journal is odd, but there may be some audit missing in leveldb, to investigate -->
<exclude>**/StoreQueueCursorJournalNoDuplicateTest.*</exclude> <exclude>**/StoreQueueCursorJournalNoDuplicateTest.*</exclude>
<exclude>**/ThreeBrokerVirtualTopicNetworkAMQPATest.*</exclude> <exclude>**/ThreeBrokerVirtualTopicNetworkAMQPATest.*</exclude>
<exclude>**/LevelDBXARecoveryBrokerTest.*</exclude>
</excludes> </excludes>
</configuration> </configuration>
</plugin> </plugin>
@ -1128,21 +1121,6 @@
<configuration> <configuration>
<excludes combine.children="append"> <excludes combine.children="append">
<exclude>**/QueueMasterSlaveSingleUrlTest.*</exclude> <exclude>**/QueueMasterSlaveSingleUrlTest.*</exclude>
<exclude>**/mLevelDBXARecoveryBrokerTest.*</exclude>
<exclude>**/StoreQueueCursorLevelDBNoDuplicateTest.*</exclude>
<exclude>**/AMQ2149LevelDBTest.*</exclude>
<exclude>**/SparseAckReplayAfterStoreCleanupLevelDBStoreTest.*</exclude>
<exclude>**/LevelDBStoreBrokerTest.*</exclude>
<exclude>**/LevelDBXARecoveryBrokerTest.*</exclude>
<exclude>**/LevelDBDurableTopicTest.*</exclude>
<exclude>**/LevelDBStoreQueueTest.*</exclude>
<exclude>**/LevelDBNegativeQueueTest.*</exclude>
<exclude>**/LevelDBStoreBrokerTest.*</exclude>
<exclude>**/LevelDBStorePerDestinationTest.*</exclude>
<exclude>**/LevelDBDurableSubscriptionTest.*</exclude>
<exclude>**/QueueBrowsingLevelDBTest.*</exclude>
<exclude>**/SingleBrokerVirtualDestinationsWithWildcardLevelDBTest.*</exclude>
<exclude>**/ThreeBrokerVirtualTopicNetworkLevelDBTest.*</exclude>
</excludes> </excludes>
</configuration> </configuration>
</plugin> </plugin>
@ -1166,27 +1144,12 @@
<exclude>**/QueueMasterSlaveSingleUrlTest.*</exclude> <exclude>**/QueueMasterSlaveSingleUrlTest.*</exclude>
<exclude>**/AMQ1866.*</exclude> <exclude>**/AMQ1866.*</exclude>
<exclude>**/AMQ2149Test.*</exclude> <exclude>**/AMQ2149Test.*</exclude>
<exclude>**/AMQ2149LevelDBTest.*</exclude>
<exclude>**/AMQ2584Test.*</exclude> <exclude>**/AMQ2584Test.*</exclude>
<exclude>**/ExpiredMessagesTest.*</exclude> <exclude>**/ExpiredMessagesTest.*</exclude>
<exclude>**/LevelDBDurableSubscriptionTest.*</exclude>
<exclude>**/LevelDBXARecoveryBrokerTest.*</exclude>
<exclude>**/ManagedDurableSubscriptionTest.*</exclude> <exclude>**/ManagedDurableSubscriptionTest.*</exclude>
<exclude>**/SparseAckReplayAfterStoreCleanupLevelDBStoreTest.*</exclude>
<exclude>**/ThreeBrokerVirtualTopicNetworkAMQPATest.*</exclude> <exclude>**/ThreeBrokerVirtualTopicNetworkAMQPATest.*</exclude>
<exclude>**/ConcurrentProducerDurableConsumerTest.*</exclude> <!-- AIX only --> <exclude>**/ConcurrentProducerDurableConsumerTest.*</exclude> <!-- AIX only -->
<exclude>**/ConcurrentProducerQueueConsumerTest.*</exclude> <!-- AIX only --> <exclude>**/ConcurrentProducerQueueConsumerTest.*</exclude> <!-- AIX only -->
<exclude>**/mLevelDBXARecoveryBrokerTest.*</exclude>
<exclude>**/StoreQueueCursorLevelDBNoDuplicateTest.*</exclude>
<exclude>**/LevelDBStoreBrokerTest.*</exclude>
<exclude>**/LevelDBDurableTopicTest.*</exclude>
<exclude>**/LevelDBStoreQueueTest.*</exclude>
<exclude>**/LevelDBNegativeQueueTest.*</exclude>
<exclude>**/LevelDBStoreBrokerTest.*</exclude>
<exclude>**/LevelDBStorePerDestinationTest.*</exclude>
<exclude>**/QueueBrowsingLevelDBTest.*</exclude>
<exclude>**/SingleBrokerVirtualDestinationsWithWildcardLevelDBTest.*</exclude>
<exclude>**/ThreeBrokerVirtualTopicNetworkLevelDBTest.*</exclude>
</excludes> </excludes>
</configuration> </configuration>
</plugin> </plugin>
@ -1209,25 +1172,10 @@
<excludes combine.children="append"> <excludes combine.children="append">
<exclude>**/QueueMasterSlaveSingleUrlTest.*</exclude> <exclude>**/QueueMasterSlaveSingleUrlTest.*</exclude>
<exclude>**/AMQ2149Test.*</exclude> <exclude>**/AMQ2149Test.*</exclude>
<exclude>**/AMQ2149LevelDBTest.*</exclude>
<exclude>**/AMQ2584Test.*</exclude> <exclude>**/AMQ2584Test.*</exclude>
<exclude>**/ExpiredMessagesTest.*</exclude> <exclude>**/ExpiredMessagesTest.*</exclude>
<exclude>**/LevelDBDurableSubscriptionTest.*</exclude>
<exclude>**/LevelDBXARecoveryBrokerTest.*</exclude>
<exclude>**/ManagedDurableSubscriptionTest.*</exclude> <exclude>**/ManagedDurableSubscriptionTest.*</exclude>
<exclude>**/SparseAckReplayAfterStoreCleanupLevelDBStoreTest.*</exclude>
<exclude>**/ThreeBrokerVirtualTopicNetworkAMQPATest.*</exclude> <exclude>**/ThreeBrokerVirtualTopicNetworkAMQPATest.*</exclude>
<exclude>**/mLevelDBXARecoveryBrokerTest.*</exclude>
<exclude>**/StoreQueueCursorLevelDBNoDuplicateTest.*</exclude>
<exclude>**/LevelDBStoreBrokerTest.*</exclude>
<exclude>**/LevelDBDurableTopicTest.*</exclude>
<exclude>**/LevelDBStoreQueueTest.*</exclude>
<exclude>**/LevelDBNegativeQueueTest.*</exclude>
<exclude>**/LevelDBStoreBrokerTest.*</exclude>
<exclude>**/LevelDBStorePerDestinationTest.*</exclude>
<exclude>**/QueueBrowsingLevelDBTest.*</exclude>
<exclude>**/SingleBrokerVirtualDestinationsWithWildcardLevelDBTest.*</exclude>
<exclude>**/ThreeBrokerVirtualTopicNetworkLevelDBTest.*</exclude>
<!-- These are performance tests and take too long to run --> <!-- These are performance tests and take too long to run -->
<exclude>**/perf/*</exclude> <exclude>**/perf/*</exclude>
<!-- These are load tests and take too long to run --> <!-- These are load tests and take too long to run -->
@ -1253,25 +1201,10 @@
<excludes combine.children="append"> <excludes combine.children="append">
<exclude>**/QueueMasterSlaveSingleUrlTest.*</exclude> <exclude>**/QueueMasterSlaveSingleUrlTest.*</exclude>
<exclude>**/AMQ2149Test.*</exclude> <exclude>**/AMQ2149Test.*</exclude>
<exclude>**/AMQ2149LevelDBTest.*</exclude>
<exclude>**/AMQ2584Test.*</exclude> <exclude>**/AMQ2584Test.*</exclude>
<exclude>**/ExpiredMessagesTest.*</exclude> <exclude>**/ExpiredMessagesTest.*</exclude>
<exclude>**/LevelDBDurableSubscriptionTest.*</exclude>
<exclude>**/LevelDBXARecoveryBrokerTest.*</exclude>
<exclude>**/ManagedDurableSubscriptionTest.*</exclude> <exclude>**/ManagedDurableSubscriptionTest.*</exclude>
<exclude>**/SparseAckReplayAfterStoreCleanupLevelDBStoreTest.*</exclude>
<exclude>**/ThreeBrokerVirtualTopicNetworkAMQPATest.*</exclude> <exclude>**/ThreeBrokerVirtualTopicNetworkAMQPATest.*</exclude>
<exclude>**/mLevelDBXARecoveryBrokerTest.*</exclude>
<exclude>**/StoreQueueCursorLevelDBNoDuplicateTest.*</exclude>
<exclude>**/LevelDBStoreBrokerTest.*</exclude>
<exclude>**/LevelDBDurableTopicTest.*</exclude>
<exclude>**/LevelDBStoreQueueTest.*</exclude>
<exclude>**/LevelDBNegativeQueueTest.*</exclude>
<exclude>**/LevelDBStoreBrokerTest.*</exclude>
<exclude>**/LevelDBStorePerDestinationTest.*</exclude>
<exclude>**/QueueBrowsingLevelDBTest.*</exclude>
<exclude>**/SingleBrokerVirtualDestinationsWithWildcardLevelDBTest.*</exclude>
<exclude>**/ThreeBrokerVirtualTopicNetworkLevelDBTest.*</exclude>
<!-- These are performance tests and take too long to run --> <!-- These are performance tests and take too long to run -->
<exclude>**/perf/*</exclude> <exclude>**/perf/*</exclude>
<!-- These are load tests and take too long to run --> <!-- These are load tests and take too long to run -->

View File

@ -42,7 +42,6 @@ import org.apache.activemq.command.ActiveMQTopic;
import org.apache.activemq.store.PersistenceAdapter; import org.apache.activemq.store.PersistenceAdapter;
import org.apache.activemq.store.jdbc.JDBCPersistenceAdapter; import org.apache.activemq.store.jdbc.JDBCPersistenceAdapter;
import org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter; import org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter;
import org.apache.activemq.store.leveldb.LevelDBPersistenceAdapter;
import org.apache.activemq.store.memory.MemoryPersistenceAdapter; import org.apache.activemq.store.memory.MemoryPersistenceAdapter;
import org.apache.activemq.util.JMXSupport; import org.apache.activemq.util.JMXSupport;
@ -197,7 +196,7 @@ public abstract class TestSupport extends CombinationTestSupport {
return proxy; return proxy;
} }
public static enum PersistenceAdapterChoice {LevelDB, KahaDB, JDBC, MEM }; public static enum PersistenceAdapterChoice {KahaDB, JDBC, MEM };
public PersistenceAdapter setDefaultPersistenceAdapter(BrokerService broker) throws IOException { public PersistenceAdapter setDefaultPersistenceAdapter(BrokerService broker) throws IOException {
return setPersistenceAdapter(broker, defaultPersistenceAdapter); return setPersistenceAdapter(broker, defaultPersistenceAdapter);
@ -214,9 +213,6 @@ public abstract class TestSupport extends CombinationTestSupport {
case KahaDB: case KahaDB:
adapter = new KahaDBPersistenceAdapter(); adapter = new KahaDBPersistenceAdapter();
break; break;
case LevelDB:
adapter = new LevelDBPersistenceAdapter();
break;
case MEM: case MEM:
adapter = new MemoryPersistenceAdapter(); adapter = new MemoryPersistenceAdapter();
break; break;

View File

@ -46,11 +46,9 @@ public class QueueMbeanRestartTest extends TestSupport {
@Parameterized.Parameters @Parameterized.Parameters
public static Collection<TestSupport.PersistenceAdapterChoice[]> getTestParameters() { public static Collection<TestSupport.PersistenceAdapterChoice[]> getTestParameters() {
TestSupport.PersistenceAdapterChoice[] kahaDb = {TestSupport.PersistenceAdapterChoice.KahaDB}; TestSupport.PersistenceAdapterChoice[] kahaDb = {TestSupport.PersistenceAdapterChoice.KahaDB};
TestSupport.PersistenceAdapterChoice[] levelDb = {TestSupport.PersistenceAdapterChoice.LevelDB};
TestSupport.PersistenceAdapterChoice[] jdbc = {TestSupport.PersistenceAdapterChoice.JDBC}; TestSupport.PersistenceAdapterChoice[] jdbc = {TestSupport.PersistenceAdapterChoice.JDBC};
List<TestSupport.PersistenceAdapterChoice[]> choices = new ArrayList<TestSupport.PersistenceAdapterChoice[]>(); List<TestSupport.PersistenceAdapterChoice[]> choices = new ArrayList<TestSupport.PersistenceAdapterChoice[]>();
choices.add(kahaDb); choices.add(kahaDb);
choices.add(levelDb);
choices.add(jdbc); choices.add(jdbc);
return choices; return choices;

View File

@ -17,7 +17,6 @@
package org.apache.activemq.broker; package org.apache.activemq.broker;
import com.google.common.collect.ImmutableList;
import org.apache.activemq.broker.region.Destination; import org.apache.activemq.broker.region.Destination;
import org.apache.activemq.broker.region.RegionBroker; import org.apache.activemq.broker.region.RegionBroker;
import org.apache.activemq.command.ActiveMQDestination; import org.apache.activemq.command.ActiveMQDestination;
@ -30,7 +29,6 @@ import org.apache.activemq.command.ProducerInfo;
import org.apache.activemq.command.SessionInfo; import org.apache.activemq.command.SessionInfo;
import org.apache.activemq.store.MessageStoreStatistics; import org.apache.activemq.store.MessageStoreStatistics;
import org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter; import org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter;
import org.apache.activemq.store.kahadb.disk.journal.Journal.JournalDiskSyncStrategy;
import org.apache.activemq.util.IOHelper; import org.apache.activemq.util.IOHelper;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
@ -39,12 +37,7 @@ import org.junit.runner.RunWith;
import org.junit.runners.Parameterized; import org.junit.runners.Parameterized;
import java.io.File; import java.io.File;
import java.util.Arrays; import java.util.*;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.stream.Collectors; import java.util.stream.Collectors;
@RunWith(value = Parameterized.class) @RunWith(value = Parameterized.class)
@ -131,7 +124,9 @@ public class RecoveryStatsBrokerTest extends BrokerRestartTestSupport {
@Test(timeout = 60 * 1000) @Test(timeout = 60 * 1000)
public void testStaticsRecovery() throws Exception { public void testStaticsRecovery() throws Exception {
List<ActiveMQDestination> destinations = ImmutableList.of(new ActiveMQQueue("TEST.A"), new ActiveMQQueue("TEST.B")); List<ActiveMQDestination> destinations = new ArrayList<>();
destinations.add(new ActiveMQQueue("TEST.A"));
destinations.add(new ActiveMQQueue("TEST.B"));
Random random = new Random(); Random random = new Random();
Map<ActiveMQDestination, Integer> consumedMessages = new HashMap<>(); Map<ActiveMQDestination, Integer> consumedMessages = new HashMap<>();

View File

@ -52,7 +52,7 @@ public class RedeliveryRestartTest extends TestSupport {
@Parameterized.Parameters(name="Store={0}") @Parameterized.Parameters(name="Store={0}")
public static Iterable<Object[]> data() { public static Iterable<Object[]> data() {
return Arrays.asList(new Object[][]{{TestSupport.PersistenceAdapterChoice.KahaDB},{TestSupport.PersistenceAdapterChoice.JDBC},{TestSupport.PersistenceAdapterChoice.LevelDB}}); return Arrays.asList(new Object[][]{{TestSupport.PersistenceAdapterChoice.KahaDB},{TestSupport.PersistenceAdapterChoice.JDBC}});
} }
@Override @Override

View File

@ -1,118 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.broker.ft;
import java.io.File;
import java.net.URI;
import java.util.concurrent.TimeUnit;
import org.apache.activemq.ActiveMQConnectionFactory;
import org.apache.activemq.broker.BrokerService;
import org.apache.activemq.broker.TransportConnector;
import org.apache.activemq.leveldb.LevelDBStore;
import org.apache.activemq.util.Wait;
import org.junit.Ignore;
public class QueueMasterSlaveSingleUrlTest extends QueueMasterSlaveTestSupport {
private final String brokerUrl = "tcp://localhost:62001";
private final String singleUriString = "failover://(" + brokerUrl +")?randomize=false&useExponentialBackOff=false";
@Override
protected void setUp() throws Exception {
setAutoFail(true);
super.setUp();
}
@Override
protected ActiveMQConnectionFactory createConnectionFactory() throws Exception {
return new ActiveMQConnectionFactory(singleUriString);
}
@Override
protected void createMaster() throws Exception {
master = new BrokerService();
master.setBrokerName("shared-master");
configureSharedPersistenceAdapter(master);
master.addConnector(brokerUrl);
master.start();
}
private void configureSharedPersistenceAdapter(BrokerService broker) throws Exception {
LevelDBStore adapter = new LevelDBStore();
adapter.setDirectory(new File("shared"));
broker.setPersistenceAdapter(adapter);
}
@Override
protected void createSlave() throws Exception {
new Thread(new Runnable() {
@Override
public void run() {
try {
BrokerService broker = new BrokerService();
broker.setBrokerName("shared-slave");
configureSharedPersistenceAdapter(broker);
// add transport as a service so that it is bound on start, after store started
final TransportConnector tConnector = new TransportConnector();
tConnector.setUri(new URI(brokerUrl));
broker.addConnector(tConnector);
broker.start();
slave.set(broker);
slaveStarted.countDown();
} catch (Exception e) {
e.printStackTrace();
}
}
}).start();
}
public void testNetworkMasterSlave() throws Exception {
final BrokerService client = new BrokerService();
client.setBrokerName("client");
client.setPersistent(false);
client.getManagementContext().setCreateConnector(false);
client.addNetworkConnector("masterslave:(tcp://localhost:62001,tcp://localhost:62002)");
client.start();
try {
Wait.waitFor(new Wait.Condition() {
@Override
public boolean isSatisified() throws Exception {
return client.getRegionBroker().getPeerBrokerInfos().length == 1;
}
});
assertTrue(!master.isSlave());
master.stop();
assertTrue("slave started", slaveStarted.await(60, TimeUnit.SECONDS));
assertTrue(!slave.get().isSlave());
Wait.waitFor(new Wait.Condition() {
@Override
public boolean isSatisified() throws Exception {
return client.getRegionBroker().getPeerBrokerInfos().length == 1;
}
});
} finally {
client.stop();
}
}
}

View File

@ -1,79 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.broker;
import junit.framework.Test;
import org.apache.activemq.command.ActiveMQDestination;
import org.apache.activemq.command.ActiveMQQueue;
import org.apache.activemq.store.kahadb.FilteredKahaDBPersistenceAdapter;
import org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter;
import org.apache.activemq.store.kahadb.MultiKahaDBPersistenceAdapter;
import org.apache.activemq.store.leveldb.LevelDBPersistenceAdapter;
import java.util.LinkedList;
import java.util.List;
public class mLevelDBXARecoveryBrokerTest extends XARecoveryBrokerTest {
@Override
protected void configureBroker(BrokerService broker) throws Exception {
super.configureBroker(broker);
MultiKahaDBPersistenceAdapter mKahaDB = new MultiKahaDBPersistenceAdapter();
List adapters = new LinkedList<FilteredKahaDBPersistenceAdapter>();
FilteredKahaDBPersistenceAdapter defaultEntry = new FilteredKahaDBPersistenceAdapter();
defaultEntry.setPersistenceAdapter(new LevelDBPersistenceAdapter());
adapters.add(defaultEntry);
FilteredKahaDBPersistenceAdapter special = new FilteredKahaDBPersistenceAdapter();
special.setDestination(new ActiveMQQueue("special"));
special.setPersistenceAdapter(new LevelDBPersistenceAdapter());
adapters.add(special);
mKahaDB.setFilteredPersistenceAdapters(adapters);
broker.setPersistenceAdapter(mKahaDB);
}
public static Test suite() {
return suite(mLevelDBXARecoveryBrokerTest.class);
}
public static void main(String[] args) {
junit.textui.TestRunner.run(suite());
}
protected ActiveMQDestination createDestination() {
return new ActiveMQQueue("test,special");
}
public void testQueuePersistentPreparedAcksAvailableAfterRestartAndRollback() throws Exception {
// super.testQueuePersistentPreparedAcksAvailableAfterRestartAndRollback();
}
public void testQueuePersistentUncommittedAcksLostOnRestart() throws Exception {
// super.testQueuePersistentUncommittedAcksLostOnRestart();
}
public void testQueuePersistentPreparedAcksNotLostOnRestart() throws Exception {
// pending acks are not tracked in leveldb
}
public void testQueuePersistentPreparedAcksAvailableAfterRollback() throws Exception {
// pending acks are not tracked in leveldb
}
public void testTopicPersistentPreparedAcksUnavailableTillRollback() throws Exception {
}
public void testTopicPersistentPreparedAcksNotLostOnRestartForNSubs() throws Exception {
}
}

View File

@ -19,6 +19,7 @@ package org.apache.activemq.broker.policy;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import java.io.File; import java.io.File;
import java.util.ArrayList;
import javax.jms.Connection; import javax.jms.Connection;
import javax.jms.ConnectionFactory; import javax.jms.ConnectionFactory;
@ -39,7 +40,6 @@ import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import com.google.common.collect.Lists;
/** /**
* This unit test is to test that setting the property "maxDestinations" on * This unit test is to test that setting the property "maxDestinations" on
@ -242,7 +242,9 @@ public class MaxDestinationsPolicyTest {
PolicyEntry entry = new PolicyEntry(); PolicyEntry entry = new PolicyEntry();
entry.setDestination(destination); entry.setDestination(destination);
entry.setMaxDestinations(maxDestinations); entry.setMaxDestinations(maxDestinations);
policyMap.setPolicyEntries(Lists.newArrayList(entry)); ArrayList<PolicyEntry> policyEntries = new ArrayList<>();
policyEntries.add(entry);
policyMap.setPolicyEntries(policyEntries);
broker.setDestinationPolicy(policyMap); broker.setDestinationPolicy(policyMap);
return policyMap; return policyMap;
} }

View File

@ -1,40 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.broker.region.cursors;
import org.apache.activeio.journal.active.JournalImpl;
import org.apache.activemq.broker.BrokerService;
import org.apache.activemq.leveldb.LevelDBStore;
import org.apache.activemq.store.journal.JournalPersistenceAdapter;
import java.io.File;
/**
* @author gtully
* @see https://issues.apache.org/activemq/browse/AMQ-2020
**/
public class StoreQueueCursorLevelDBNoDuplicateTest extends StoreQueueCursorNoDuplicateTest {
@Override
protected BrokerService createBroker() throws Exception {
BrokerService broker = super.createBroker();
LevelDBStore store = new LevelDBStore();
store.setDirectory(new File("target/activemq-data/leveldb"));
broker.setPersistenceAdapter(store);
return broker;
}
}

View File

@ -1,224 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.bugs;
import java.util.ArrayList;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import javax.jms.Connection;
import javax.jms.MessageConsumer;
import javax.jms.MessageProducer;
import javax.jms.Session;
import javax.jms.TextMessage;
import junit.framework.TestCase;
import org.apache.activemq.ActiveMQConnectionFactory;
import org.apache.activemq.broker.BrokerService;
import org.apache.activemq.broker.region.policy.PolicyEntry;
import org.apache.activemq.broker.region.policy.PolicyMap;
import org.apache.activemq.command.ActiveMQQueue;
import org.apache.activemq.leveldb.LevelDBStore;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This is a test case for the issue reported at:
* https://issues.apache.org/activemq/browse/AMQ-1866
*
* If you have a JMS producer sending messages to multiple fast consumers and
* one slow consumer, eventually all consumers will run as slow as
* the slowest consumer.
*/
public class AMQ1866 extends TestCase {
private static final Logger log = LoggerFactory.getLogger(ConsumerThread.class);
private BrokerService brokerService;
private ArrayList<Thread> threads = new ArrayList<Thread>();
private final String ACTIVEMQ_BROKER_BIND = "tcp://localhost:0";
private String ACTIVEMQ_BROKER_URI;
AtomicBoolean shutdown = new AtomicBoolean();
private ActiveMQQueue destination;
@Override
protected void setUp() throws Exception {
// Start an embedded broker up.
brokerService = new BrokerService();
LevelDBStore adaptor = new LevelDBStore();
brokerService.setPersistenceAdapter(adaptor);
brokerService.deleteAllMessages();
// A small max page size makes this issue occur faster.
PolicyMap policyMap = new PolicyMap();
PolicyEntry pe = new PolicyEntry();
pe.setMaxPageSize(1);
policyMap.put(new ActiveMQQueue(">"), pe);
brokerService.setDestinationPolicy(policyMap);
brokerService.addConnector(ACTIVEMQ_BROKER_BIND);
brokerService.start();
ACTIVEMQ_BROKER_URI = brokerService.getTransportConnectors().get(0).getPublishableConnectString();
destination = new ActiveMQQueue(getName());
}
@Override
protected void tearDown() throws Exception {
// Stop any running threads.
shutdown.set(true);
for (Thread t : threads) {
t.interrupt();
t.join();
}
brokerService.stop();
}
public void testConsumerSlowDownPrefetch0() throws Exception {
ACTIVEMQ_BROKER_URI = ACTIVEMQ_BROKER_URI + "?jms.prefetchPolicy.queuePrefetch=0";
doTestConsumerSlowDown();
}
public void testConsumerSlowDownPrefetch10() throws Exception {
ACTIVEMQ_BROKER_URI = ACTIVEMQ_BROKER_URI + "?jms.prefetchPolicy.queuePrefetch=10";
doTestConsumerSlowDown();
}
public void testConsumerSlowDownDefaultPrefetch() throws Exception {
doTestConsumerSlowDown();
}
public void doTestConsumerSlowDown() throws Exception {
// Preload the queue.
produce(20000);
Thread producer = new Thread() {
@Override
public void run() {
try {
while(!shutdown.get()) {
produce(1000);
}
} catch (Exception e) {
}
}
};
threads.add(producer);
producer.start();
// This is the slow consumer.
ConsumerThread c1 = new ConsumerThread("Consumer-1");
threads.add(c1);
c1.start();
// Wait a bit so that the slow consumer gets assigned most of the messages.
Thread.sleep(500);
ConsumerThread c2 = new ConsumerThread("Consumer-2");
threads.add(c2);
c2.start();
int totalReceived = 0;
for ( int i=0; i < 30; i++) {
Thread.sleep(1000);
long c1Counter = c1.counter.getAndSet(0);
long c2Counter = c2.counter.getAndSet(0);
log.debug("c1: "+c1Counter+", c2: "+c2Counter);
totalReceived += c1Counter;
totalReceived += c2Counter;
// Once message have been flowing for a few seconds, start asserting that c2 always gets messages. It should be receiving about 100 / sec
if( i > 10 ) {
assertTrue("Total received=" + totalReceived + ", Consumer 2 should be receiving new messages every second.", c2Counter > 0);
}
}
}
public void produce(int count) throws Exception {
Connection connection=null;
try {
ActiveMQConnectionFactory factory = new ActiveMQConnectionFactory(ACTIVEMQ_BROKER_URI);
factory.setDispatchAsync(true);
connection = factory.createConnection();
Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
MessageProducer producer = session.createProducer(destination);
connection.start();
for( int i=0 ; i< count; i++ ) {
producer.send(session.createTextMessage(getName()+" Message "+(++i)));
}
} finally {
try {
connection.close();
} catch (Throwable e) {
}
}
}
public class ConsumerThread extends Thread {
final AtomicLong counter = new AtomicLong();
public ConsumerThread(String threadId) {
super(threadId);
}
public void run() {
Connection connection=null;
try {
log.debug(getName() + ": is running");
ActiveMQConnectionFactory factory = new ActiveMQConnectionFactory(ACTIVEMQ_BROKER_URI);
factory.setDispatchAsync(true);
connection = factory.createConnection();
Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
MessageConsumer consumer = session.createConsumer(destination);
connection.start();
while (!shutdown.get()) {
TextMessage msg = (TextMessage)consumer.receive(1000);
if ( msg!=null ) {
int sleepingTime;
if (getName().equals("Consumer-1")) {
sleepingTime = 1000 * 1000;
} else {
sleepingTime = 1;
}
counter.incrementAndGet();
Thread.sleep(sleepingTime);
}
}
} catch (Exception e) {
} finally {
log.debug(getName() + ": is stopping");
try {
connection.close();
} catch (Throwable e) {
}
}
}
}
}

View File

@ -1,35 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.bugs;
import org.apache.activemq.broker.BrokerService;
import org.apache.activemq.leveldb.LevelDBStore;
import org.junit.Ignore;
//Ignored because there are now exceptions thrown on send when the broker is
//shutdown which cause the test to fail and need to be accounted for
//The parent test is also excluded in the pom.xml currently and not run
@Ignore
public class AMQ2149LevelDBTest extends AMQ2149Test {
@Override
protected void configurePersistenceAdapter(BrokerService brokerService) throws Exception {
LevelDBStore persistenceFactory = new LevelDBStore();
persistenceFactory.setDirectory(dataDirFile);
brokerService.setPersistenceAdapter(persistenceFactory);
}
}

View File

@ -320,7 +320,6 @@ public class AMQ2149Test {
} }
} }
// attempt to simply replicate leveldb failure. no joy yet
public void x_testRestartReReceive() throws Exception { public void x_testRestartReReceive() throws Exception {
createBroker(new Configurer() { createBroker(new Configurer() {
public void configure(BrokerService broker) throws Exception { public void configure(BrokerService broker) throws Exception {

View File

@ -66,10 +66,8 @@ public class AMQ2584Test extends org.apache.activemq.TestSupport {
@Parameterized.Parameters(name="{0}") @Parameterized.Parameters(name="{0}")
public static Collection<TestSupport.PersistenceAdapterChoice[]> getTestParameters() { public static Collection<TestSupport.PersistenceAdapterChoice[]> getTestParameters() {
TestSupport.PersistenceAdapterChoice[] kahaDb = {TestSupport.PersistenceAdapterChoice.KahaDB}; TestSupport.PersistenceAdapterChoice[] kahaDb = {TestSupport.PersistenceAdapterChoice.KahaDB};
TestSupport.PersistenceAdapterChoice[] levelDb = {TestSupport.PersistenceAdapterChoice.LevelDB};
List<TestSupport.PersistenceAdapterChoice[]> choices = new ArrayList<TestSupport.PersistenceAdapterChoice[]>(); List<TestSupport.PersistenceAdapterChoice[]> choices = new ArrayList<TestSupport.PersistenceAdapterChoice[]>();
choices.add(kahaDb); choices.add(kahaDb);
choices.add(levelDb);
return choices; return choices;
} }

View File

@ -42,7 +42,6 @@ import org.apache.activemq.ActiveMQSession;
import org.apache.activemq.broker.BrokerService; import org.apache.activemq.broker.BrokerService;
import org.apache.activemq.command.ActiveMQQueue; import org.apache.activemq.command.ActiveMQQueue;
import org.apache.activemq.command.ActiveMQTopic; import org.apache.activemq.command.ActiveMQTopic;
import org.apache.activemq.leveldb.LevelDBStore;
import org.apache.activemq.store.PersistenceAdapter; import org.apache.activemq.store.PersistenceAdapter;
import org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter; import org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter;
import org.apache.activemq.store.kahadb.disk.journal.DataFile; import org.apache.activemq.store.kahadb.disk.journal.DataFile;
@ -249,9 +248,6 @@ public class AMQ2832Test {
startBroker(); startBroker();
PersistenceAdapter pa = broker.getPersistenceAdapter(); PersistenceAdapter pa = broker.getPersistenceAdapter();
if (pa instanceof LevelDBStore) {
return;
}
ActiveMQQueue queue = new ActiveMQQueue("MyQueue"); ActiveMQQueue queue = new ActiveMQQueue("MyQueue");
ActiveMQQueue disposable = new ActiveMQQueue("MyDisposableQueue"); ActiveMQQueue disposable = new ActiveMQQueue("MyDisposableQueue");

View File

@ -64,12 +64,8 @@ public class AMQ2870Test extends org.apache.activemq.TestSupport {
String osName = System.getProperty("os.name"); String osName = System.getProperty("os.name");
LOG.info("Running on [" + osName + "]"); LOG.info("Running on [" + osName + "]");
PersistenceAdapterChoice[] kahaDb = {PersistenceAdapterChoice.KahaDB}; PersistenceAdapterChoice[] kahaDb = {PersistenceAdapterChoice.KahaDB};
PersistenceAdapterChoice[] levelDb = {PersistenceAdapterChoice.LevelDB};
List<PersistenceAdapterChoice[]> choices = new ArrayList<PersistenceAdapterChoice[]>(); List<PersistenceAdapterChoice[]> choices = new ArrayList<PersistenceAdapterChoice[]>();
choices.add(kahaDb); choices.add(kahaDb);
if (!osName.equalsIgnoreCase("AIX") && !osName.equalsIgnoreCase("SunOS")) {
choices.add(levelDb);
}
return choices; return choices;
} }
@ -191,8 +187,6 @@ public class AMQ2870Test extends org.apache.activemq.TestSupport {
properties.put("maxFileLength", maxFileLengthVal); properties.put("maxFileLength", maxFileLengthVal);
properties.put("cleanupInterval", "2000"); properties.put("cleanupInterval", "2000");
properties.put("checkpointInterval", "2000"); properties.put("checkpointInterval", "2000");
// leveldb
properties.put("logSize", maxFileLengthVal); properties.put("logSize", maxFileLengthVal);
IntrospectionSupport.setProperties(persistenceAdapter, properties); IntrospectionSupport.setProperties(persistenceAdapter, properties);

View File

@ -1,38 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.bugs;
import java.io.File;
import org.apache.activemq.broker.BrokerService;
import org.apache.activemq.leveldb.LevelDBStore;
public class AMQ4485LowLimitLevelDBTest extends AMQ4485LowLimitTest {
public AMQ4485LowLimitLevelDBTest() {
super();
numBrokers = 2;
}
protected BrokerService createBroker(int brokerid, boolean addToNetwork) throws Exception {
BrokerService broker = super.createBroker(brokerid, addToNetwork);
LevelDBStore levelDBStore = new LevelDBStore();
levelDBStore.setDirectory(new File(broker.getBrokerDataDirectory(),"levelDB"));
broker.setPersistenceAdapter(levelDBStore);
return broker;
}
}

View File

@ -1,184 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.bugs;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.FilenameFilter;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import javax.jms.*;
import javax.management.ObjectName;
import org.apache.activemq.ActiveMQConnectionFactory;
import org.apache.activemq.broker.BrokerService;
import org.apache.activemq.broker.region.policy.PolicyEntry;
import org.apache.activemq.broker.region.policy.PolicyMap;
import org.apache.activemq.leveldb.LevelDBStore;
import org.apache.activemq.leveldb.LevelDBStoreViewMBean;
import org.apache.activemq.util.Wait;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class AMQ4677Test {
private static final transient Logger LOG = LoggerFactory.getLogger(AMQ4677Test.class);
private static BrokerService brokerService;
@Rule public TestName name = new TestName();
private File dataDirFile;
@Before
public void setUp() throws Exception {
dataDirFile = new File("target/LevelDBCleanupTest");
brokerService = new BrokerService();
brokerService.setBrokerName("LevelDBBroker");
brokerService.setPersistent(true);
brokerService.setUseJmx(true);
brokerService.setAdvisorySupport(false);
brokerService.setDeleteAllMessagesOnStartup(true);
brokerService.setDataDirectoryFile(dataDirFile);
LevelDBStore persistenceFactory = new LevelDBStore();
persistenceFactory.setDirectory(dataDirFile);
brokerService.setPersistenceAdapter(persistenceFactory);
brokerService.start();
brokerService.waitUntilStarted();
}
@After
public void tearDown() throws Exception {
brokerService.stop();
brokerService.waitUntilStopped();
}
@Test
public void testSendAndReceiveAllMessages() throws Exception {
ActiveMQConnectionFactory connectionFactory = new ActiveMQConnectionFactory("vm://LevelDBBroker");
Connection connection = connectionFactory.createConnection();
connection.setClientID(getClass().getName());
connection.start();
final Session session = connection.createSession(true, Session.AUTO_ACKNOWLEDGE);
Destination destination = session.createQueue(name.toString());
MessageProducer producer = session.createProducer(destination);
producer.setDeliveryMode(DeliveryMode.PERSISTENT);
final LevelDBStoreViewMBean levelDBView = getLevelDBStoreMBean();
assertNotNull(levelDBView);
levelDBView.compact();
final int SIZE = 10 * 1024;
final int MSG_COUNT = 30000; // very slow consuming 60k messages of size 30k
final CountDownLatch done = new CountDownLatch(MSG_COUNT);
byte buffer[] = new byte[SIZE];
for (int i = 0; i < SIZE; ++i) {
buffer[i] = (byte) 128;
}
for (int i = 0; i < MSG_COUNT; ++i) {
BytesMessage message = session.createBytesMessage();
message.writeBytes(buffer);
producer.send(message);
if ((i % 1000) == 0) {
LOG.info("Sent message #{}", i);
session.commit();
}
}
session.commit();
LOG.info("Finished sending all messages.");
MessageConsumer consumer = session.createConsumer(destination);
consumer.setMessageListener(new MessageListener() {
@Override
public void onMessage(Message message) {
if ((done.getCount() % 1000) == 0) {
try {
LOG.info("Received message #{}", MSG_COUNT - done.getCount());
session.commit();
} catch (JMSException e) {
}
}
done.countDown();
}
});
done.await(15, TimeUnit.MINUTES);
session.commit();
LOG.info("Finished receiving all messages.");
assertTrue("Should < 3 logfiles left.", Wait.waitFor(new Wait.Condition() {
@Override
public boolean isSatisified() throws Exception {
levelDBView.compact();
return countLogFiles() < 3;
}
}, TimeUnit.MINUTES.toMillis(5), (int)TimeUnit.SECONDS.toMillis(30)));
levelDBView.compact();
LOG.info("Current number of logs {}", countLogFiles());
}
protected long countLogFiles() {
String[] logFiles = dataDirFile.list(new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
if (name.endsWith("log")) {
return true;
}
return false;
}
});
LOG.info("Current number of logs {}", logFiles.length);
return logFiles.length;
}
protected LevelDBStoreViewMBean getLevelDBStoreMBean() throws Exception {
ObjectName levelDbViewMBeanQuery = new ObjectName(
"org.apache.activemq:type=Broker,brokerName=LevelDBBroker,service=PersistenceAdapter,instanceName=LevelDB*");
Set<ObjectName> names = brokerService.getManagementContext().queryNames(null, levelDbViewMBeanQuery);
if (names.isEmpty() || names.size() > 1) {
throw new java.lang.IllegalStateException("Can't find levelDB store name.");
}
LevelDBStoreViewMBean proxy = (LevelDBStoreViewMBean) brokerService.getManagementContext()
.newProxyInstance(names.iterator().next(), LevelDBStoreViewMBean.class, true);
return proxy;
}
}

View File

@ -91,7 +91,6 @@ public class AMQ5266SingleDestTest {
public static Iterable<Object[]> parameters() { public static Iterable<Object[]> parameters() {
return Arrays.asList(new Object[][]{ return Arrays.asList(new Object[][]{
{1000, 40, 40, 1024*1024*1, true, TestSupport.PersistenceAdapterChoice.KahaDB, false}, {1000, 40, 40, 1024*1024*1, true, TestSupport.PersistenceAdapterChoice.KahaDB, false},
{1000, 40, 40, 1024*1024*1, true, TestSupport.PersistenceAdapterChoice.LevelDB, false},
{1000, 40, 40, 1024*1024*1, true, TestSupport.PersistenceAdapterChoice.JDBC, false}, {1000, 40, 40, 1024*1024*1, true, TestSupport.PersistenceAdapterChoice.JDBC, false},
}); });
} }

View File

@ -96,11 +96,9 @@ public class AMQ5266StarvedConsumerTest {
public static Iterable<Object[]> parameters() { public static Iterable<Object[]> parameters() {
return Arrays.asList(new Object[][]{ return Arrays.asList(new Object[][]{
{1000, 40, 5, 1024*1024, false, TestSupport.PersistenceAdapterChoice.KahaDB, true}, {1000, 40, 5, 1024*1024, false, TestSupport.PersistenceAdapterChoice.KahaDB, true},
{1000, 40, 5, 1024*1024, false, TestSupport.PersistenceAdapterChoice.LevelDB, true},
{1000, 40, 5, 1024*1024, false, TestSupport.PersistenceAdapterChoice.JDBC, true}, {1000, 40, 5, 1024*1024, false, TestSupport.PersistenceAdapterChoice.JDBC, true},
{500, 20, 20, 1024*1024, false, TestSupport.PersistenceAdapterChoice.KahaDB, true}, {500, 20, 20, 1024*1024, false, TestSupport.PersistenceAdapterChoice.KahaDB, true},
{500, 20, 20, 1024*1024, false, TestSupport.PersistenceAdapterChoice.LevelDB, true},
{500, 20, 20, 1024*1024, false, TestSupport.PersistenceAdapterChoice.JDBC, true}, {500, 20, 20, 1024*1024, false, TestSupport.PersistenceAdapterChoice.JDBC, true},
}); });
} }

Some files were not shown because too many files have changed in this diff Show More