mirror of https://github.com/apache/activemq.git
Refactor the scheduler store into a more KahaDB style store that can recover from various problems like missing journal files or corruption as well as rebuild its index when needed. Move the scheduler store into a more configurable style that allows for users to plug in their own implementations. Store update from legacy versions is automatic.
This commit is contained in:
parent
aa79c7ec7b
commit
74846bb2b4
|
@ -1866,6 +1866,23 @@ public class BrokerService implements Service {
|
|||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
PersistenceAdapter pa = getPersistenceAdapter();
|
||||
if (pa != null) {
|
||||
this.jobSchedulerStore = pa.createJobSchedulerStore();
|
||||
jobSchedulerStore.setDirectory(getSchedulerDirectoryFile());
|
||||
configureService(jobSchedulerStore);
|
||||
jobSchedulerStore.start();
|
||||
return this.jobSchedulerStore;
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
} catch (UnsupportedOperationException ex) {
|
||||
// It's ok if the store doesn't implement a scheduler.
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
try {
|
||||
PersistenceAdapter pa = getPersistenceAdapter();
|
||||
if (pa != null && pa instanceof JobSchedulerStore) {
|
||||
|
@ -1877,9 +1894,13 @@ public class BrokerService implements Service {
|
|||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
// Load the KahaDB store as a last resort, this only works if KahaDB is
|
||||
// included at runtime, otherwise this will fail. User should disable
|
||||
// scheduler support if this fails.
|
||||
try {
|
||||
String clazz = "org.apache.activemq.store.kahadb.scheduler.JobSchedulerStoreImpl";
|
||||
jobSchedulerStore = (JobSchedulerStore) getClass().getClassLoader().loadClass(clazz).newInstance();
|
||||
String clazz = "org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter";
|
||||
PersistenceAdapter adaptor = (PersistenceAdapter)getClass().getClassLoader().loadClass(clazz).newInstance();
|
||||
jobSchedulerStore = adaptor.createJobSchedulerStore();
|
||||
jobSchedulerStore.setDirectory(getSchedulerDirectoryFile());
|
||||
configureService(jobSchedulerStore);
|
||||
jobSchedulerStore.start();
|
||||
|
|
|
@ -16,23 +16,39 @@
|
|||
*/
|
||||
package org.apache.activemq.broker.jmx;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import javax.management.openmbean.CompositeDataSupport;
|
||||
import javax.management.openmbean.CompositeType;
|
||||
import javax.management.openmbean.TabularData;
|
||||
import javax.management.openmbean.TabularDataSupport;
|
||||
import javax.management.openmbean.TabularType;
|
||||
|
||||
import org.apache.activemq.broker.jmx.OpenTypeSupport.OpenTypeFactory;
|
||||
import org.apache.activemq.broker.scheduler.Job;
|
||||
import org.apache.activemq.broker.scheduler.JobScheduler;
|
||||
import org.apache.activemq.broker.scheduler.JobSupport;
|
||||
|
||||
import javax.management.openmbean.*;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* MBean object that can be used to manage a single instance of a JobScheduler. The object
|
||||
* provides methods for querying for jobs and removing some or all of the jobs that are
|
||||
* scheduled in the managed store.
|
||||
*/
|
||||
public class JobSchedulerView implements JobSchedulerViewMBean {
|
||||
|
||||
private final JobScheduler jobScheduler;
|
||||
|
||||
/**
|
||||
* Creates a new instance of the JobScheduler management MBean.
|
||||
*
|
||||
* @param jobScheduler
|
||||
* The scheduler instance to manage.
|
||||
*/
|
||||
public JobSchedulerView(JobScheduler jobScheduler) {
|
||||
this.jobScheduler = jobScheduler;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TabularData getAllJobs() throws Exception {
|
||||
OpenTypeFactory factory = OpenTypeSupport.getFactory(Job.class);
|
||||
CompositeType ct = factory.getCompositeType();
|
||||
|
@ -45,6 +61,7 @@ public class JobSchedulerView implements JobSchedulerViewMBean {
|
|||
return rc;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TabularData getAllJobs(String startTime, String finishTime) throws Exception {
|
||||
OpenTypeFactory factory = OpenTypeSupport.getFactory(Job.class);
|
||||
CompositeType ct = factory.getCompositeType();
|
||||
|
@ -59,6 +76,7 @@ public class JobSchedulerView implements JobSchedulerViewMBean {
|
|||
return rc;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TabularData getNextScheduleJobs() throws Exception {
|
||||
OpenTypeFactory factory = OpenTypeSupport.getFactory(Job.class);
|
||||
CompositeType ct = factory.getCompositeType();
|
||||
|
@ -71,31 +89,51 @@ public class JobSchedulerView implements JobSchedulerViewMBean {
|
|||
return rc;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getNextScheduleTime() throws Exception {
|
||||
long time = this.jobScheduler.getNextScheduleTime();
|
||||
return JobSupport.getDateTime(time);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeAllJobs() throws Exception {
|
||||
this.jobScheduler.removeAllJobs();
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeAllJobs(String startTime, String finishTime) throws Exception {
|
||||
long start = JobSupport.getDataTime(startTime);
|
||||
long finish = JobSupport.getDataTime(finishTime);
|
||||
this.jobScheduler.removeAllJobs(start, finish);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeAllJobsAtScheduledTime(String time) throws Exception {
|
||||
long removeAtTime = JobSupport.getDataTime(time);
|
||||
this.jobScheduler.remove(removeAtTime);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeJobAtScheduledTime(String time) throws Exception {
|
||||
removeAllJobsAtScheduledTime(time);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeJob(String jobId) throws Exception {
|
||||
this.jobScheduler.remove(jobId);
|
||||
|
||||
}
|
||||
|
||||
public void removeJobAtScheduledTime(String time) throws IOException {
|
||||
// TODO Auto-generated method stub
|
||||
@Override
|
||||
public int getExecutionCount(String jobId) throws Exception {
|
||||
int result = 0;
|
||||
|
||||
List<Job> jobs = this.jobScheduler.getAllJobs();
|
||||
for (Job job : jobs) {
|
||||
if (job.getJobId().equals(jobId)) {
|
||||
result = job.getExecutionCount();
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -18,76 +18,125 @@ package org.apache.activemq.broker.jmx;
|
|||
|
||||
import javax.management.openmbean.TabularData;
|
||||
|
||||
|
||||
|
||||
public interface JobSchedulerViewMBean {
|
||||
|
||||
/**
|
||||
* remove all jobs scheduled to run at this time
|
||||
* Remove all jobs scheduled to run at this time. If there are no jobs scheduled
|
||||
* at the given time this methods returns without making any modifications to the
|
||||
* scheduler store.
|
||||
*
|
||||
* @param time
|
||||
* @throws Exception
|
||||
* the string formated time that should be used to remove jobs.
|
||||
*
|
||||
* @throws Exception if an error occurs while performing the remove.
|
||||
*
|
||||
* @deprecated use removeAllJobsAtScheduledTime instead as it is more explicit about what
|
||||
* the method is actually doing.
|
||||
*/
|
||||
@Deprecated
|
||||
@MBeanInfo("remove jobs with matching execution time")
|
||||
public abstract void removeJobAtScheduledTime(@MBeanInfo("time: yyyy-MM-dd hh:mm:ss")String time) throws Exception;
|
||||
|
||||
/**
|
||||
* remove a job with the matching jobId
|
||||
* Remove all jobs scheduled to run at this time. If there are no jobs scheduled
|
||||
* at the given time this methods returns without making any modifications to the
|
||||
* scheduler store.
|
||||
*
|
||||
* @param time
|
||||
* the string formated time that should be used to remove jobs.
|
||||
*
|
||||
* @throws Exception if an error occurs while performing the remove.
|
||||
*/
|
||||
@MBeanInfo("remove jobs with matching execution time")
|
||||
public abstract void removeAllJobsAtScheduledTime(@MBeanInfo("time: yyyy-MM-dd hh:mm:ss")String time) throws Exception;
|
||||
|
||||
/**
|
||||
* Remove a job with the matching jobId. If the method does not find a matching job
|
||||
* then it returns without throwing an error or making any modifications to the job
|
||||
* scheduler store.
|
||||
*
|
||||
* @param jobId
|
||||
* @throws Exception
|
||||
* the Job Id to remove from the scheduler store.
|
||||
*
|
||||
* @throws Exception if an error occurs while attempting to remove the Job.
|
||||
*/
|
||||
@MBeanInfo("remove jobs with matching jobId")
|
||||
public abstract void removeJob(@MBeanInfo("jobId")String jobId) throws Exception;
|
||||
|
||||
|
||||
/**
|
||||
* remove all the Jobs from the scheduler
|
||||
* @throws Exception
|
||||
* Remove all the Jobs from the scheduler,
|
||||
*
|
||||
* @throws Exception if an error occurs while purging the store.
|
||||
*/
|
||||
@MBeanInfo("remove all scheduled jobs")
|
||||
public abstract void removeAllJobs() throws Exception;
|
||||
|
||||
|
||||
/**
|
||||
* remove all the Jobs from the scheduler that are due between the start and finish times
|
||||
* @param start time
|
||||
* @param finish time
|
||||
* @throws Exception
|
||||
* Remove all the Jobs from the scheduler that are due between the start and finish times.
|
||||
*
|
||||
* @param start
|
||||
* the starting time to remove jobs from.
|
||||
* @param finish
|
||||
* the finish time for the remove operation.
|
||||
*
|
||||
* @throws Exception if an error occurs while attempting to remove the jobs.
|
||||
*/
|
||||
@MBeanInfo("remove all scheduled jobs between time ranges ")
|
||||
public abstract void removeAllJobs(@MBeanInfo("start: yyyy-MM-dd hh:mm:ss")String start,@MBeanInfo("finish: yyyy-MM-dd hh:mm:ss")String finish) throws Exception;
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Get the next time jobs will be fired
|
||||
* @return the time in milliseconds
|
||||
* @throws Exception
|
||||
* Get the next time jobs will be fired from this scheduler store.
|
||||
*
|
||||
* @return the time in milliseconds of the next job to execute.
|
||||
*
|
||||
* @throws Exception if an error occurs while accessing the store.
|
||||
*/
|
||||
@MBeanInfo("get the next time a job is due to be scheduled ")
|
||||
public abstract String getNextScheduleTime() throws Exception;
|
||||
|
||||
|
||||
/**
|
||||
* Get all the jobs scheduled to run next
|
||||
* Gets the number of times a scheduled Job has been executed.
|
||||
*
|
||||
* @return the total number of time a scheduled job has executed.
|
||||
*
|
||||
* @throws Exception if an error occurs while querying for the Job.
|
||||
*/
|
||||
@MBeanInfo("get the next time a job is due to be scheduled ")
|
||||
public abstract int getExecutionCount(@MBeanInfo("jobId")String jobId) throws Exception;
|
||||
|
||||
/**
|
||||
* Get all the jobs scheduled to run next.
|
||||
*
|
||||
* @return a list of jobs that will be scheduled next
|
||||
* @throws Exception
|
||||
*
|
||||
* @throws Exception if an error occurs while reading the scheduler store.
|
||||
*/
|
||||
@MBeanInfo("get the next job(s) to be scheduled. Not HTML friendly ")
|
||||
public abstract TabularData getNextScheduleJobs() throws Exception;
|
||||
|
||||
/**
|
||||
* Get all the outstanding Jobs
|
||||
* @return a table of all jobs
|
||||
* @throws Exception
|
||||
|
||||
/**
|
||||
* Get all the outstanding Jobs that are scheduled in this scheduler store.
|
||||
*
|
||||
* @return a table of all jobs in this scheduler store.
|
||||
*
|
||||
* @throws Exception if an error occurs while reading the store.
|
||||
*/
|
||||
@MBeanInfo("get the scheduled Jobs in the Store. Not HTML friendly ")
|
||||
public abstract TabularData getAllJobs() throws Exception;
|
||||
|
||||
/**
|
||||
* Get all outstanding jobs due to run between start and finish
|
||||
* @param start
|
||||
* @param finish
|
||||
* @return a table of jobs in the range
|
||||
* @throws Exception
|
||||
|
||||
/**
|
||||
* Get all outstanding jobs due to run between start and finish time range.
|
||||
*
|
||||
* @param start
|
||||
* the starting time range to query the store for jobs.
|
||||
* @param finish
|
||||
* the ending time of this query for scheduled jobs.
|
||||
*
|
||||
* @return a table of jobs in the range given.
|
||||
*
|
||||
* @throws Exception if an error occurs while querying the scheduler store.
|
||||
*/
|
||||
@MBeanInfo("get the scheduled Jobs in the Store within the time range. Not HTML friendly ")
|
||||
public abstract TabularData getAllJobs(@MBeanInfo("start: yyyy-MM-dd hh:mm:ss")String start,@MBeanInfo("finish: yyyy-MM-dd hh:mm:ss")String finish)throws Exception;
|
||||
|
||||
}
|
||||
|
|
|
@ -16,7 +16,12 @@
|
|||
*/
|
||||
package org.apache.activemq.broker.scheduler;
|
||||
|
||||
|
||||
/**
|
||||
* Interface for a scheduled Job object.
|
||||
*
|
||||
* Each Job is identified by a unique Job Id which can be used to reference the Job
|
||||
* in the Job Scheduler store for updates or removal.
|
||||
*/
|
||||
public interface Job {
|
||||
|
||||
/**
|
||||
|
@ -38,11 +43,12 @@ public interface Job {
|
|||
* @return the Delay
|
||||
*/
|
||||
public abstract long getDelay();
|
||||
|
||||
/**
|
||||
* @return the period
|
||||
*/
|
||||
public abstract long getPeriod();
|
||||
|
||||
|
||||
/**
|
||||
* @return the cron entry
|
||||
*/
|
||||
|
@ -52,17 +58,24 @@ public interface Job {
|
|||
* @return the payload
|
||||
*/
|
||||
public abstract byte[] getPayload();
|
||||
|
||||
|
||||
/**
|
||||
* Get the start time as a Date time string
|
||||
* @return the date time
|
||||
*/
|
||||
public String getStartTime();
|
||||
|
||||
|
||||
/**
|
||||
* Get the time the job is next due to execute
|
||||
* Get the time the job is next due to execute
|
||||
* @return the date time
|
||||
*/
|
||||
public String getNextExecutionTime();
|
||||
|
||||
/**
|
||||
* Gets the total number of times this job has executed.
|
||||
*
|
||||
* @returns the number of times this job has been executed.
|
||||
*/
|
||||
public int getExecutionCount();
|
||||
|
||||
}
|
|
@ -18,13 +18,21 @@ package org.apache.activemq.broker.scheduler;
|
|||
|
||||
import org.apache.activemq.util.ByteSequence;
|
||||
|
||||
/**
|
||||
* Job event listener interface. Provides event points for Job related events
|
||||
* such as job ready events.
|
||||
*/
|
||||
public interface JobListener {
|
||||
|
||||
|
||||
/**
|
||||
* A Job that has been scheduled is now ready
|
||||
* @param id
|
||||
* A Job that has been scheduled is now ready to be fired. The Job is passed
|
||||
* in its raw byte form and must be un-marshaled before being delivered.
|
||||
*
|
||||
* @param jobId
|
||||
* The unique Job Id of the Job that is ready to fire.
|
||||
* @param job
|
||||
* The job that is now ready, delivered in byte form.
|
||||
*/
|
||||
public void scheduledJob(String id,ByteSequence job);
|
||||
public void scheduledJob(String id, ByteSequence job);
|
||||
|
||||
}
|
||||
|
|
|
@ -46,20 +46,25 @@ public interface JobScheduler {
|
|||
void stopDispatching() throws Exception;
|
||||
|
||||
/**
|
||||
* Add a Job listener
|
||||
* Add a Job listener which will receive events related to scheduled jobs.
|
||||
*
|
||||
* @param listener
|
||||
* The job listener to add.
|
||||
*
|
||||
* @param l
|
||||
* @throws Exception
|
||||
*/
|
||||
void addListener(JobListener l) throws Exception;
|
||||
void addListener(JobListener listener) throws Exception;
|
||||
|
||||
/**
|
||||
* remove a JobListener
|
||||
* remove a JobListener that was previously registered. If the given listener is not in
|
||||
* the registry this method has no effect.
|
||||
*
|
||||
* @param listener
|
||||
* The listener that should be removed from the listener registry.
|
||||
*
|
||||
* @param l
|
||||
* @throws Exception
|
||||
*/
|
||||
void removeListener(JobListener l) throws Exception;
|
||||
void removeListener(JobListener listener) throws Exception;
|
||||
|
||||
/**
|
||||
* Add a job to be scheduled
|
||||
|
@ -70,7 +75,8 @@ public interface JobScheduler {
|
|||
* the message to be sent when the job is scheduled
|
||||
* @param delay
|
||||
* the time in milliseconds before the job will be run
|
||||
* @throws Exception
|
||||
*
|
||||
* @throws Exception if an error occurs while scheduling the Job.
|
||||
*/
|
||||
void schedule(String jobId, ByteSequence payload, long delay) throws Exception;
|
||||
|
||||
|
@ -82,8 +88,9 @@ public interface JobScheduler {
|
|||
* @param payload
|
||||
* the message to be sent when the job is scheduled
|
||||
* @param cronEntry
|
||||
* - cron entry
|
||||
* @throws Exception
|
||||
* The cron entry to use to schedule this job.
|
||||
*
|
||||
* @throws Exception if an error occurs while scheduling the Job.
|
||||
*/
|
||||
void schedule(String jobId, ByteSequence payload, String cronEntry) throws Exception;
|
||||
|
||||
|
@ -95,7 +102,7 @@ public interface JobScheduler {
|
|||
* @param payload
|
||||
* the message to be sent when the job is scheduled
|
||||
* @param cronEntry
|
||||
* - cron entry
|
||||
* cron entry
|
||||
* @param delay
|
||||
* time in ms to wait before scheduling
|
||||
* @param period
|
||||
|
@ -110,6 +117,8 @@ public interface JobScheduler {
|
|||
* remove all jobs scheduled to run at this time
|
||||
*
|
||||
* @param time
|
||||
* The UTC time to use to remove a batch of scheduled Jobs.
|
||||
*
|
||||
* @throws Exception
|
||||
*/
|
||||
void remove(long time) throws Exception;
|
||||
|
@ -118,7 +127,9 @@ public interface JobScheduler {
|
|||
* remove a job with the matching jobId
|
||||
*
|
||||
* @param jobId
|
||||
* @throws Exception
|
||||
* The unique Job Id to search for and remove from the scheduled set of jobs.
|
||||
*
|
||||
* @throws Exception if an error occurs while removing the Job.
|
||||
*/
|
||||
void remove(String jobId) throws Exception;
|
||||
|
||||
|
|
|
@ -21,6 +21,12 @@ import java.util.List;
|
|||
|
||||
import org.apache.activemq.util.ByteSequence;
|
||||
|
||||
/**
|
||||
* A wrapper for instances of the JobScheduler interface that ensures that methods
|
||||
* provides safe and sane return values and can deal with null values being passed
|
||||
* in etc. Provides a measure of safety when using unknown implementations of the
|
||||
* JobSchedulerStore which might not always do the right thing.
|
||||
*/
|
||||
public class JobSchedulerFacade implements JobScheduler {
|
||||
|
||||
private final SchedulerBroker broker;
|
||||
|
|
|
@ -26,13 +26,56 @@ import org.apache.activemq.Service;
|
|||
*/
|
||||
public interface JobSchedulerStore extends Service {
|
||||
|
||||
/**
|
||||
* Gets the location where the Job Scheduler will write the persistent data used
|
||||
* to preserve and recover scheduled Jobs.
|
||||
*
|
||||
* If the scheduler implementation does not utilize a file system based store this
|
||||
* method returns null.
|
||||
*
|
||||
* @return the directory where persistent store data is written.
|
||||
*/
|
||||
File getDirectory();
|
||||
|
||||
/**
|
||||
* Sets the directory where persistent store data will be written. This method
|
||||
* must be called before the scheduler store is started to have any effect.
|
||||
*
|
||||
* @param directory
|
||||
* The directory where the job scheduler store is to be located.
|
||||
*/
|
||||
void setDirectory(File directory);
|
||||
|
||||
/**
|
||||
* The size of the current store on disk if the store utilizes a disk based store
|
||||
* mechanism.
|
||||
*
|
||||
* @return the current store size on disk.
|
||||
*/
|
||||
long size();
|
||||
|
||||
/**
|
||||
* Returns the JobScheduler instance identified by the given name.
|
||||
*
|
||||
* @param name
|
||||
* the name of the JobScheduler instance to lookup.
|
||||
*
|
||||
* @return the named JobScheduler or null if none exists with the given name.
|
||||
*
|
||||
* @throws Exception if an error occurs while loading the named scheduler.
|
||||
*/
|
||||
JobScheduler getJobScheduler(String name) throws Exception;
|
||||
|
||||
/**
|
||||
* Removes the named JobScheduler if it exists, purging all scheduled messages
|
||||
* assigned to it.
|
||||
*
|
||||
* @param name
|
||||
* the name of the scheduler instance to remove.
|
||||
*
|
||||
* @return true if there was a scheduler with the given name to remove.
|
||||
*
|
||||
* @throws Exception if an error occurs while removing the scheduler.
|
||||
*/
|
||||
boolean removeJobScheduler(String name) throws Exception;
|
||||
}
|
||||
|
|
|
@ -20,7 +20,11 @@ import java.text.DateFormat;
|
|||
import java.text.SimpleDateFormat;
|
||||
import java.util.Date;
|
||||
|
||||
/**
|
||||
* A class to provide common Job Scheduler related methods.
|
||||
*/
|
||||
public class JobSupport {
|
||||
|
||||
public static String getDateTime(long value) {
|
||||
DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
|
||||
Date date = new Date(value);
|
||||
|
@ -32,5 +36,4 @@ public class JobSupport {
|
|||
Date date = dfm.parse(value);
|
||||
return date.getTime();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import java.util.Set;
|
|||
|
||||
import org.apache.activemq.Service;
|
||||
import org.apache.activemq.broker.ConnectionContext;
|
||||
import org.apache.activemq.broker.scheduler.JobSchedulerStore;
|
||||
import org.apache.activemq.command.ActiveMQDestination;
|
||||
import org.apache.activemq.command.ActiveMQQueue;
|
||||
import org.apache.activemq.command.ActiveMQTopic;
|
||||
|
@ -31,74 +32,99 @@ import org.apache.activemq.usage.SystemUsage;
|
|||
/**
|
||||
* Adapter to the actual persistence mechanism used with ActiveMQ
|
||||
*
|
||||
*
|
||||
*
|
||||
*/
|
||||
public interface PersistenceAdapter extends Service {
|
||||
|
||||
/**
|
||||
* Returns a set of all the {@link org.apache.activemq.command.ActiveMQDestination}
|
||||
* objects that the persistence store is aware exist.
|
||||
* Returns a set of all the
|
||||
* {@link org.apache.activemq.command.ActiveMQDestination} objects that the
|
||||
* persistence store is aware exist.
|
||||
*
|
||||
* @return active destinations
|
||||
*/
|
||||
Set<ActiveMQDestination> getDestinations();
|
||||
|
||||
/**
|
||||
* Factory method to create a new queue message store with the given destination name
|
||||
* Factory method to create a new queue message store with the given
|
||||
* destination name
|
||||
*
|
||||
* @param destination
|
||||
* @return the message store
|
||||
* @throws IOException
|
||||
* @throws IOException
|
||||
*/
|
||||
MessageStore createQueueMessageStore(ActiveMQQueue destination) throws IOException;
|
||||
|
||||
/**
|
||||
* Factory method to create a new topic message store with the given destination name
|
||||
* @param destination
|
||||
* Factory method to create a new topic message store with the given
|
||||
* destination name
|
||||
*
|
||||
* @param destination
|
||||
* @return the topic message store
|
||||
* @throws IOException
|
||||
* @throws IOException
|
||||
*/
|
||||
TopicMessageStore createTopicMessageStore(ActiveMQTopic destination) throws IOException;
|
||||
|
||||
/**
|
||||
* Creates and returns a new Job Scheduler store instance.
|
||||
*
|
||||
* @return a new JobSchedulerStore instance if this Persistence adapter provides its own.
|
||||
*
|
||||
* @throws IOException If an error occurs while creating the new JobSchedulerStore.
|
||||
* @throws UnsupportedOperationException If this adapter does not provide its own
|
||||
* scheduler store implementation.
|
||||
*/
|
||||
JobSchedulerStore createJobSchedulerStore() throws IOException, UnsupportedOperationException;
|
||||
|
||||
/**
|
||||
* Cleanup method to remove any state associated with the given destination.
|
||||
* This method does not stop the message store (it might not be cached).
|
||||
* @param destination Destination to forget
|
||||
*
|
||||
* @param destination
|
||||
* Destination to forget
|
||||
*/
|
||||
void removeQueueMessageStore(ActiveMQQueue destination);
|
||||
|
||||
/**
|
||||
* Cleanup method to remove any state associated with the given destination
|
||||
* This method does not stop the message store (it might not be cached).
|
||||
* @param destination Destination to forget
|
||||
*
|
||||
* @param destination
|
||||
* Destination to forget
|
||||
*/
|
||||
void removeTopicMessageStore(ActiveMQTopic destination);
|
||||
|
||||
/**
|
||||
* Factory method to create a new persistent prepared transaction store for XA recovery
|
||||
* Factory method to create a new persistent prepared transaction store for
|
||||
* XA recovery
|
||||
*
|
||||
* @return transaction store
|
||||
* @throws IOException
|
||||
* @throws IOException
|
||||
*/
|
||||
TransactionStore createTransactionStore() throws IOException;
|
||||
|
||||
/**
|
||||
* This method starts a transaction on the persistent storage - which is nothing to
|
||||
* do with JMS or XA transactions - its purely a mechanism to perform multiple writes
|
||||
* to a persistent store in 1 transaction as a performance optimization.
|
||||
* This method starts a transaction on the persistent storage - which is
|
||||
* nothing to do with JMS or XA transactions - its purely a mechanism to
|
||||
* perform multiple writes to a persistent store in 1 transaction as a
|
||||
* performance optimization.
|
||||
* <p/>
|
||||
* Typically one transaction will require one disk synchronization point and so for
|
||||
* real high performance its usually faster to perform many writes within the same
|
||||
* transaction to minimize latency caused by disk synchronization. This is especially
|
||||
* true when using tools like Berkeley Db or embedded JDBC servers.
|
||||
* @param context
|
||||
* @throws IOException
|
||||
* Typically one transaction will require one disk synchronization point and
|
||||
* so for real high performance its usually faster to perform many writes
|
||||
* within the same transaction to minimize latency caused by disk
|
||||
* synchronization. This is especially true when using tools like Berkeley
|
||||
* Db or embedded JDBC servers.
|
||||
*
|
||||
* @param context
|
||||
* @throws IOException
|
||||
*/
|
||||
void beginTransaction(ConnectionContext context) throws IOException;
|
||||
|
||||
|
||||
/**
|
||||
* Commit a persistence transaction
|
||||
* @param context
|
||||
* @throws IOException
|
||||
*
|
||||
* @param context
|
||||
* @throws IOException
|
||||
*
|
||||
* @see PersistenceAdapter#beginTransaction(ConnectionContext context)
|
||||
*/
|
||||
|
@ -106,40 +132,45 @@ public interface PersistenceAdapter extends Service {
|
|||
|
||||
/**
|
||||
* Rollback a persistence transaction
|
||||
* @param context
|
||||
* @throws IOException
|
||||
*
|
||||
* @param context
|
||||
* @throws IOException
|
||||
*
|
||||
* @see PersistenceAdapter#beginTransaction(ConnectionContext context)
|
||||
*/
|
||||
void rollbackTransaction(ConnectionContext context) throws IOException;
|
||||
|
||||
|
||||
/**
|
||||
*
|
||||
*
|
||||
* @return last broker sequence
|
||||
* @throws IOException
|
||||
*/
|
||||
long getLastMessageBrokerSequenceId() throws IOException;
|
||||
|
||||
|
||||
/**
|
||||
* Delete's all the messages in the persistent store.
|
||||
*
|
||||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
void deleteAllMessages() throws IOException;
|
||||
|
||||
|
||||
/**
|
||||
* @param usageManager The UsageManager that is controlling the broker's memory usage.
|
||||
* @param usageManager
|
||||
* The UsageManager that is controlling the broker's memory
|
||||
* usage.
|
||||
*/
|
||||
void setUsageManager(SystemUsage usageManager);
|
||||
|
||||
|
||||
/**
|
||||
* Set the name of the broker using the adapter
|
||||
*
|
||||
* @param brokerName
|
||||
*/
|
||||
void setBrokerName(String brokerName);
|
||||
|
||||
|
||||
/**
|
||||
* Set the directory where any data files should be created
|
||||
*
|
||||
* @param dir
|
||||
*/
|
||||
void setDirectory(File dir);
|
||||
|
@ -148,26 +179,30 @@ public interface PersistenceAdapter extends Service {
|
|||
* @return the directory used by the persistence adaptor
|
||||
*/
|
||||
File getDirectory();
|
||||
|
||||
|
||||
/**
|
||||
* checkpoint any
|
||||
* @param sync
|
||||
* @throws IOException
|
||||
*
|
||||
* @param sync
|
||||
* @throws IOException
|
||||
*
|
||||
*/
|
||||
void checkpoint(boolean sync) throws IOException;
|
||||
|
||||
|
||||
/**
|
||||
* A hint to return the size of the store on disk
|
||||
*
|
||||
* @return disk space used in bytes of 0 if not implemented
|
||||
*/
|
||||
long size();
|
||||
|
||||
/**
|
||||
* return the last stored producer sequenceId for this producer Id
|
||||
* used to suppress duplicate sends on failover reconnect at the transport
|
||||
* when a reconnect occurs
|
||||
* @param id the producerId to find a sequenceId for
|
||||
* return the last stored producer sequenceId for this producer Id used to
|
||||
* suppress duplicate sends on failover reconnect at the transport when a
|
||||
* reconnect occurs
|
||||
*
|
||||
* @param id
|
||||
* the producerId to find a sequenceId for
|
||||
* @return the last stored sequence id or -1 if no suppression needed
|
||||
*/
|
||||
long getLastProducerSequenceId(ProducerId id) throws IOException;
|
||||
|
|
|
@ -24,6 +24,7 @@ import java.util.Set;
|
|||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import org.apache.activemq.broker.ConnectionContext;
|
||||
import org.apache.activemq.broker.scheduler.JobSchedulerStore;
|
||||
import org.apache.activemq.command.ActiveMQDestination;
|
||||
import org.apache.activemq.command.ActiveMQQueue;
|
||||
import org.apache.activemq.command.ActiveMQTopic;
|
||||
|
@ -39,7 +40,7 @@ import org.slf4j.LoggerFactory;
|
|||
|
||||
/**
|
||||
* @org.apache.xbean.XBean
|
||||
*
|
||||
*
|
||||
*/
|
||||
public class MemoryPersistenceAdapter implements PersistenceAdapter {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MemoryPersistenceAdapter.class);
|
||||
|
@ -49,6 +50,7 @@ public class MemoryPersistenceAdapter implements PersistenceAdapter {
|
|||
ConcurrentHashMap<ActiveMQDestination, MessageStore> queues = new ConcurrentHashMap<ActiveMQDestination, MessageStore>();
|
||||
private boolean useExternalMessageReferences;
|
||||
|
||||
@Override
|
||||
public Set<ActiveMQDestination> getDestinations() {
|
||||
Set<ActiveMQDestination> rc = new HashSet<ActiveMQDestination>(queues.size() + topics.size());
|
||||
for (Iterator<ActiveMQDestination> iter = queues.keySet().iterator(); iter.hasNext();) {
|
||||
|
@ -64,6 +66,7 @@ public class MemoryPersistenceAdapter implements PersistenceAdapter {
|
|||
return new MemoryPersistenceAdapter();
|
||||
}
|
||||
|
||||
@Override
|
||||
public MessageStore createQueueMessageStore(ActiveMQQueue destination) throws IOException {
|
||||
MessageStore rc = queues.get(destination);
|
||||
if (rc == null) {
|
||||
|
@ -76,6 +79,7 @@ public class MemoryPersistenceAdapter implements PersistenceAdapter {
|
|||
return rc;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TopicMessageStore createTopicMessageStore(ActiveMQTopic destination) throws IOException {
|
||||
TopicMessageStore rc = topics.get(destination);
|
||||
if (rc == null) {
|
||||
|
@ -93,6 +97,7 @@ public class MemoryPersistenceAdapter implements PersistenceAdapter {
|
|||
*
|
||||
* @param destination Destination to forget
|
||||
*/
|
||||
@Override
|
||||
public void removeQueueMessageStore(ActiveMQQueue destination) {
|
||||
queues.remove(destination);
|
||||
}
|
||||
|
@ -102,10 +107,12 @@ public class MemoryPersistenceAdapter implements PersistenceAdapter {
|
|||
*
|
||||
* @param destination Destination to forget
|
||||
*/
|
||||
@Override
|
||||
public void removeTopicMessageStore(ActiveMQTopic destination) {
|
||||
topics.remove(destination);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TransactionStore createTransactionStore() throws IOException {
|
||||
if (transactionStore == null) {
|
||||
transactionStore = new MemoryTransactionStore(this);
|
||||
|
@ -113,25 +120,32 @@ public class MemoryPersistenceAdapter implements PersistenceAdapter {
|
|||
return transactionStore;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void beginTransaction(ConnectionContext context) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void commitTransaction(ConnectionContext context) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rollbackTransaction(ConnectionContext context) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void start() throws Exception {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop() throws Exception {
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getLastMessageBrokerSequenceId() throws IOException {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteAllMessages() throws IOException {
|
||||
for (Iterator<TopicMessageStore> iter = topics.values().iterator(); iter.hasNext();) {
|
||||
MemoryMessageStore store = asMemoryMessageStore(iter.next());
|
||||
|
@ -177,38 +191,52 @@ public class MemoryPersistenceAdapter implements PersistenceAdapter {
|
|||
* @param usageManager The UsageManager that is controlling the broker's
|
||||
* memory usage.
|
||||
*/
|
||||
@Override
|
||||
public void setUsageManager(SystemUsage usageManager) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "MemoryPersistenceAdapter";
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setBrokerName(String brokerName) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setDirectory(File dir) {
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public File getDirectory(){
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkpoint(boolean sync) throws IOException {
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public long size(){
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
public void setCreateTransactionStore(boolean create) throws IOException {
|
||||
if (create) {
|
||||
createTransactionStore();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getLastProducerSequenceId(ProducerId id) {
|
||||
// memory map does duplicate suppression
|
||||
return -1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public JobSchedulerStore createJobSchedulerStore() throws IOException, UnsupportedOperationException {
|
||||
// We could eventuall implement an in memory scheduler.
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -61,8 +61,9 @@ public final class IOHelper {
|
|||
}
|
||||
|
||||
/**
|
||||
* Converts any string into a string that is safe to use as a file name.
|
||||
* The result will only include ascii characters and numbers, and the "-","_", and "." characters.
|
||||
* Converts any string into a string that is safe to use as a file name. The
|
||||
* result will only include ascii characters and numbers, and the "-","_",
|
||||
* and "." characters.
|
||||
*
|
||||
* @param name
|
||||
* @return
|
||||
|
@ -76,15 +77,16 @@ public final class IOHelper {
|
|||
}
|
||||
|
||||
/**
|
||||
* Converts any string into a string that is safe to use as a file name.
|
||||
* The result will only include ascii characters and numbers, and the "-","_", and "." characters.
|
||||
* Converts any string into a string that is safe to use as a file name. The
|
||||
* result will only include ascii characters and numbers, and the "-","_",
|
||||
* and "." characters.
|
||||
*
|
||||
* @param name
|
||||
* @param dirSeparators
|
||||
* @param maxFileLength
|
||||
* @return
|
||||
*/
|
||||
public static String toFileSystemSafeName(String name,boolean dirSeparators,int maxFileLength) {
|
||||
public static String toFileSystemSafeName(String name, boolean dirSeparators, int maxFileLength) {
|
||||
int size = name.length();
|
||||
StringBuffer rc = new StringBuffer(size * 2);
|
||||
for (int i = 0; i < size; i++) {
|
||||
|
@ -92,8 +94,7 @@ public final class IOHelper {
|
|||
boolean valid = c >= 'a' && c <= 'z';
|
||||
valid = valid || (c >= 'A' && c <= 'Z');
|
||||
valid = valid || (c >= '0' && c <= '9');
|
||||
valid = valid || (c == '_') || (c == '-') || (c == '.') || (c=='#')
|
||||
||(dirSeparators && ( (c == '/') || (c == '\\')));
|
||||
valid = valid || (c == '_') || (c == '-') || (c == '.') || (c == '#') || (dirSeparators && ((c == '/') || (c == '\\')));
|
||||
|
||||
if (valid) {
|
||||
rc.append(c);
|
||||
|
@ -105,7 +106,7 @@ public final class IOHelper {
|
|||
}
|
||||
String result = rc.toString();
|
||||
if (result.length() > maxFileLength) {
|
||||
result = result.substring(result.length()-maxFileLength,result.length());
|
||||
result = result.substring(result.length() - maxFileLength, result.length());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -168,8 +169,7 @@ public final class IOHelper {
|
|||
} else {
|
||||
for (int i = 0; i < files.length; i++) {
|
||||
File file = files[i];
|
||||
if (file.getName().equals(".")
|
||||
|| file.getName().equals("..")) {
|
||||
if (file.getName().equals(".") || file.getName().equals("..")) {
|
||||
continue;
|
||||
}
|
||||
if (file.isDirectory()) {
|
||||
|
@ -190,6 +190,27 @@ public final class IOHelper {
|
|||
}
|
||||
}
|
||||
|
||||
public static void moveFiles(File srcDirectory, File targetDirectory, FilenameFilter filter) throws IOException {
|
||||
if (!srcDirectory.isDirectory()) {
|
||||
throw new IOException("source is not a directory");
|
||||
}
|
||||
|
||||
if (targetDirectory.exists() && !targetDirectory.isDirectory()) {
|
||||
throw new IOException("target exists and is not a directory");
|
||||
} else {
|
||||
mkdirs(targetDirectory);
|
||||
}
|
||||
|
||||
List<File> filesToMove = new ArrayList<File>();
|
||||
getFiles(srcDirectory, filesToMove, filter);
|
||||
|
||||
for (File file : filesToMove) {
|
||||
if (!file.isDirectory()) {
|
||||
moveFile(file, targetDirectory);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static void copyFile(File src, File dest) throws IOException {
|
||||
copyFile(src, dest, null);
|
||||
}
|
||||
|
@ -222,32 +243,32 @@ public final class IOHelper {
|
|||
File parent = src.getParentFile();
|
||||
String fromPath = from.getAbsolutePath();
|
||||
if (parent.getAbsolutePath().equals(fromPath)) {
|
||||
//one level down
|
||||
// one level down
|
||||
result = to;
|
||||
}else {
|
||||
} else {
|
||||
String parentPath = parent.getAbsolutePath();
|
||||
String path = parentPath.substring(fromPath.length());
|
||||
result = new File(to.getAbsolutePath()+File.separator+path);
|
||||
result = new File(to.getAbsolutePath() + File.separator + path);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
static List<File> getFiles(File dir,FilenameFilter filter){
|
||||
static List<File> getFiles(File dir, FilenameFilter filter) {
|
||||
List<File> result = new ArrayList<File>();
|
||||
getFiles(dir,result,filter);
|
||||
getFiles(dir, result, filter);
|
||||
return result;
|
||||
}
|
||||
|
||||
static void getFiles(File dir,List<File> list,FilenameFilter filter) {
|
||||
static void getFiles(File dir, List<File> list, FilenameFilter filter) {
|
||||
if (!list.contains(dir)) {
|
||||
list.add(dir);
|
||||
String[] fileNames=dir.list(filter);
|
||||
for (int i =0; i < fileNames.length;i++) {
|
||||
File f = new File(dir,fileNames[i]);
|
||||
String[] fileNames = dir.list(filter);
|
||||
for (int i = 0; i < fileNames.length; i++) {
|
||||
File f = new File(dir, fileNames[i]);
|
||||
if (f.isFile()) {
|
||||
list.add(f);
|
||||
}else {
|
||||
getFiles(dir,list,filter);
|
||||
} else {
|
||||
getFiles(dir, list, filter);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -286,12 +307,13 @@ public final class IOHelper {
|
|||
public static void mkdirs(File dir) throws IOException {
|
||||
if (dir.exists()) {
|
||||
if (!dir.isDirectory()) {
|
||||
throw new IOException("Failed to create directory '" + dir +"', regular file already existed with that name");
|
||||
throw new IOException("Failed to create directory '" + dir +
|
||||
"', regular file already existed with that name");
|
||||
}
|
||||
|
||||
} else {
|
||||
if (!dir.mkdirs()) {
|
||||
throw new IOException("Failed to create directory '" + dir+"'");
|
||||
throw new IOException("Failed to create directory '" + dir + "'");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.activemq.ActiveMQMessageAudit;
|
|||
import org.apache.activemq.broker.BrokerService;
|
||||
import org.apache.activemq.broker.ConnectionContext;
|
||||
import org.apache.activemq.broker.Locker;
|
||||
import org.apache.activemq.broker.scheduler.JobSchedulerStore;
|
||||
import org.apache.activemq.command.ActiveMQDestination;
|
||||
import org.apache.activemq.command.ActiveMQQueue;
|
||||
import org.apache.activemq.command.ActiveMQTopic;
|
||||
|
@ -422,6 +423,7 @@ public class JDBCPersistenceAdapter extends DataSourceServiceSupport implements
|
|||
this.lockDataSource = dataSource;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BrokerService getBrokerService() {
|
||||
return brokerService;
|
||||
}
|
||||
|
@ -846,4 +848,9 @@ public class JDBCPersistenceAdapter extends DataSourceServiceSupport implements
|
|||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public JobSchedulerStore createJobSchedulerStore() throws IOException, UnsupportedOperationException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@ import java.util.concurrent.ThreadFactory;
|
|||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import org.apache.activeio.journal.InvalidRecordLocationException;
|
||||
import org.apache.activeio.journal.Journal;
|
||||
import org.apache.activeio.journal.JournalEventListener;
|
||||
|
@ -40,6 +41,7 @@ import org.apache.activeio.packet.Packet;
|
|||
import org.apache.activemq.broker.BrokerService;
|
||||
import org.apache.activemq.broker.BrokerServiceAware;
|
||||
import org.apache.activemq.broker.ConnectionContext;
|
||||
import org.apache.activemq.broker.scheduler.JobSchedulerStore;
|
||||
import org.apache.activemq.command.ActiveMQDestination;
|
||||
import org.apache.activemq.command.ActiveMQQueue;
|
||||
import org.apache.activemq.command.ActiveMQTopic;
|
||||
|
@ -78,14 +80,14 @@ import org.slf4j.LoggerFactory;
|
|||
* An implementation of {@link PersistenceAdapter} designed for use with a
|
||||
* {@link Journal} and then check pointing asynchronously on a timeout with some
|
||||
* other long term persistent storage.
|
||||
*
|
||||
*
|
||||
* @org.apache.xbean.XBean
|
||||
*
|
||||
*
|
||||
*/
|
||||
public class JournalPersistenceAdapter implements PersistenceAdapter, JournalEventListener, UsageListener, BrokerServiceAware {
|
||||
|
||||
private BrokerService brokerService;
|
||||
|
||||
|
||||
protected Scheduler scheduler;
|
||||
private static final Logger LOG = LoggerFactory.getLogger(JournalPersistenceAdapter.class);
|
||||
|
||||
|
@ -118,9 +120,9 @@ public class JournalPersistenceAdapter implements PersistenceAdapter, JournalEve
|
|||
private TaskRunnerFactory taskRunnerFactory;
|
||||
private File directory;
|
||||
|
||||
public JournalPersistenceAdapter() {
|
||||
public JournalPersistenceAdapter() {
|
||||
}
|
||||
|
||||
|
||||
public JournalPersistenceAdapter(Journal journal, PersistenceAdapter longTermPersistence, TaskRunnerFactory taskRunnerFactory) throws IOException {
|
||||
setJournal(journal);
|
||||
setTaskRunnerFactory(taskRunnerFactory);
|
||||
|
@ -135,13 +137,14 @@ public class JournalPersistenceAdapter implements PersistenceAdapter, JournalEve
|
|||
this.journal = journal;
|
||||
journal.setJournalEventListener(this);
|
||||
}
|
||||
|
||||
|
||||
public void setPersistenceAdapter(PersistenceAdapter longTermPersistence) {
|
||||
this.longTermPersistence = longTermPersistence;
|
||||
}
|
||||
|
||||
|
||||
final Runnable createPeriodicCheckpointTask() {
|
||||
return new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
long lastTime = 0;
|
||||
synchronized (this) {
|
||||
|
@ -158,11 +161,13 @@ public class JournalPersistenceAdapter implements PersistenceAdapter, JournalEve
|
|||
* @param usageManager The UsageManager that is controlling the
|
||||
* destination's memory usage.
|
||||
*/
|
||||
@Override
|
||||
public void setUsageManager(SystemUsage usageManager) {
|
||||
this.usageManager = usageManager;
|
||||
longTermPersistence.setUsageManager(usageManager);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<ActiveMQDestination> getDestinations() {
|
||||
Set<ActiveMQDestination> destinations = new HashSet<ActiveMQDestination>(longTermPersistence.getDestinations());
|
||||
destinations.addAll(queues.keySet());
|
||||
|
@ -178,6 +183,7 @@ public class JournalPersistenceAdapter implements PersistenceAdapter, JournalEve
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public MessageStore createQueueMessageStore(ActiveMQQueue destination) throws IOException {
|
||||
JournalMessageStore store = queues.get(destination);
|
||||
if (store == null) {
|
||||
|
@ -188,6 +194,7 @@ public class JournalPersistenceAdapter implements PersistenceAdapter, JournalEve
|
|||
return store;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TopicMessageStore createTopicMessageStore(ActiveMQTopic destinationName) throws IOException {
|
||||
JournalTopicMessageStore store = topics.get(destinationName);
|
||||
if (store == null) {
|
||||
|
@ -203,6 +210,7 @@ public class JournalPersistenceAdapter implements PersistenceAdapter, JournalEve
|
|||
*
|
||||
* @param destination Destination to forget
|
||||
*/
|
||||
@Override
|
||||
public void removeQueueMessageStore(ActiveMQQueue destination) {
|
||||
queues.remove(destination);
|
||||
}
|
||||
|
@ -212,30 +220,37 @@ public class JournalPersistenceAdapter implements PersistenceAdapter, JournalEve
|
|||
*
|
||||
* @param destination Destination to forget
|
||||
*/
|
||||
@Override
|
||||
public void removeTopicMessageStore(ActiveMQTopic destination) {
|
||||
topics.remove(destination);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TransactionStore createTransactionStore() throws IOException {
|
||||
return transactionStore;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getLastMessageBrokerSequenceId() throws IOException {
|
||||
return longTermPersistence.getLastMessageBrokerSequenceId();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void beginTransaction(ConnectionContext context) throws IOException {
|
||||
longTermPersistence.beginTransaction(context);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void commitTransaction(ConnectionContext context) throws IOException {
|
||||
longTermPersistence.commitTransaction(context);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rollbackTransaction(ConnectionContext context) throws IOException {
|
||||
longTermPersistence.rollbackTransaction(context);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void start() throws Exception {
|
||||
if (!started.compareAndSet(false, true)) {
|
||||
return;
|
||||
|
@ -246,12 +261,14 @@ public class JournalPersistenceAdapter implements PersistenceAdapter, JournalEve
|
|||
}
|
||||
|
||||
checkpointTask = taskRunnerFactory.createTaskRunner(new Task() {
|
||||
@Override
|
||||
public boolean iterate() {
|
||||
return doCheckpoint();
|
||||
}
|
||||
}, "ActiveMQ Journal Checkpoint Worker");
|
||||
|
||||
checkpointExecutor = new ThreadPoolExecutor(maxCheckpointWorkers, maxCheckpointWorkers, 30, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new ThreadFactory() {
|
||||
@Override
|
||||
public Thread newThread(Runnable runable) {
|
||||
Thread t = new Thread(runable, "Journal checkpoint worker");
|
||||
t.setPriority(7);
|
||||
|
@ -279,6 +296,7 @@ public class JournalPersistenceAdapter implements PersistenceAdapter, JournalEve
|
|||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop() throws Exception {
|
||||
|
||||
this.usageManager.getMemoryUsage().removeUsageListener(this);
|
||||
|
@ -330,16 +348,17 @@ public class JournalPersistenceAdapter implements PersistenceAdapter, JournalEve
|
|||
/**
|
||||
* The Journal give us a call back so that we can move old data out of the
|
||||
* journal. Taking a checkpoint does this for us.
|
||||
*
|
||||
*
|
||||
* @see org.apache.activemq.journal.JournalEventListener#overflowNotification(org.apache.activemq.journal.RecordLocation)
|
||||
*/
|
||||
@Override
|
||||
public void overflowNotification(RecordLocation safeLocation) {
|
||||
checkpoint(false, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* When we checkpoint we move all the journalled data to long term storage.
|
||||
*
|
||||
*
|
||||
*/
|
||||
public void checkpoint(boolean sync, boolean fullCheckpoint) {
|
||||
try {
|
||||
|
@ -369,13 +388,14 @@ public class JournalPersistenceAdapter implements PersistenceAdapter, JournalEve
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkpoint(boolean sync) {
|
||||
checkpoint(sync, sync);
|
||||
}
|
||||
|
||||
/**
|
||||
* This does the actual checkpoint.
|
||||
*
|
||||
*
|
||||
* @return
|
||||
*/
|
||||
public boolean doCheckpoint() {
|
||||
|
@ -398,7 +418,7 @@ public class JournalPersistenceAdapter implements PersistenceAdapter, JournalEve
|
|||
// We do many partial checkpoints (fullCheckpoint==false) to move
|
||||
// topic messages
|
||||
// to long term store as soon as possible.
|
||||
//
|
||||
//
|
||||
// We want to avoid doing that for queue messages since removes the
|
||||
// come in the same
|
||||
// checkpoint cycle will nullify the previous message add.
|
||||
|
@ -411,6 +431,7 @@ public class JournalPersistenceAdapter implements PersistenceAdapter, JournalEve
|
|||
try {
|
||||
final JournalMessageStore ms = iterator.next();
|
||||
FutureTask<RecordLocation> task = new FutureTask<RecordLocation>(new Callable<RecordLocation>() {
|
||||
@Override
|
||||
public RecordLocation call() throws Exception {
|
||||
return ms.checkpoint();
|
||||
}
|
||||
|
@ -428,6 +449,7 @@ public class JournalPersistenceAdapter implements PersistenceAdapter, JournalEve
|
|||
try {
|
||||
final JournalTopicMessageStore ms = iterator.next();
|
||||
FutureTask<RecordLocation> task = new FutureTask<RecordLocation>(new Callable<RecordLocation>() {
|
||||
@Override
|
||||
public RecordLocation call() throws Exception {
|
||||
return ms.checkpoint();
|
||||
}
|
||||
|
@ -505,7 +527,7 @@ public class JournalPersistenceAdapter implements PersistenceAdapter, JournalEve
|
|||
/**
|
||||
* Move all the messages that were in the journal into long term storage. We
|
||||
* just replay and do a checkpoint.
|
||||
*
|
||||
*
|
||||
* @throws IOException
|
||||
* @throws IOException
|
||||
* @throws InvalidRecordLocationException
|
||||
|
@ -644,11 +666,11 @@ public class JournalPersistenceAdapter implements PersistenceAdapter, JournalEve
|
|||
public RecordLocation writeCommand(DataStructure command, boolean sync) throws IOException {
|
||||
if (started.get()) {
|
||||
try {
|
||||
return journal.write(toPacket(wireFormat.marshal(command)), sync);
|
||||
return journal.write(toPacket(wireFormat.marshal(command)), sync);
|
||||
} catch (IOException ioe) {
|
||||
LOG.error("Cannot write to the journal", ioe);
|
||||
brokerService.handleIOException(ioe);
|
||||
throw ioe;
|
||||
LOG.error("Cannot write to the journal", ioe);
|
||||
brokerService.handleIOException(ioe);
|
||||
throw ioe;
|
||||
}
|
||||
}
|
||||
throw new IOException("closed");
|
||||
|
@ -660,6 +682,7 @@ public class JournalPersistenceAdapter implements PersistenceAdapter, JournalEve
|
|||
return writeCommand(trace, sync);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onUsageChanged(Usage usage, int oldPercentUsage, int newPercentUsage) {
|
||||
newPercentUsage = (newPercentUsage / 10) * 10;
|
||||
oldPercentUsage = (oldPercentUsage / 10) * 10;
|
||||
|
@ -673,6 +696,7 @@ public class JournalPersistenceAdapter implements PersistenceAdapter, JournalEve
|
|||
return transactionStore;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteAllMessages() throws IOException {
|
||||
try {
|
||||
JournalTrace trace = new JournalTrace();
|
||||
|
@ -735,6 +759,7 @@ public class JournalPersistenceAdapter implements PersistenceAdapter, JournalEve
|
|||
return new ByteSequence(sequence.getData(), sequence.getOffset(), sequence.getLength());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setBrokerName(String brokerName) {
|
||||
longTermPersistence.setBrokerName(brokerName);
|
||||
}
|
||||
|
@ -744,18 +769,22 @@ public class JournalPersistenceAdapter implements PersistenceAdapter, JournalEve
|
|||
return "JournalPersistenceAdapter(" + longTermPersistence + ")";
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setDirectory(File dir) {
|
||||
this.directory=dir;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public File getDirectory(){
|
||||
return directory;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public long size(){
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setBrokerService(BrokerService brokerService) {
|
||||
this.brokerService = brokerService;
|
||||
PersistenceAdapter pa = getLongTermPersistence();
|
||||
|
@ -764,8 +793,14 @@ public class JournalPersistenceAdapter implements PersistenceAdapter, JournalEve
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getLastProducerSequenceId(ProducerId id) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public JobSchedulerStore createJobSchedulerStore() throws IOException, UnsupportedOperationException {
|
||||
return longTermPersistence.createJobSchedulerStore();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,57 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.activemq.store.kahadb;
|
||||
|
||||
import org.apache.activemq.store.kahadb.disk.journal.Location;
|
||||
import org.apache.activemq.store.kahadb.disk.page.Page;
|
||||
|
||||
public abstract class AbstractKahaDBMetaData<T> implements KahaDBMetaData<T> {
|
||||
|
||||
private int state;
|
||||
private Location lastUpdateLocation;
|
||||
private Page<T> page;
|
||||
|
||||
@Override
|
||||
public Page<T> getPage() {
|
||||
return page;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getState() {
|
||||
return state;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Location getLastUpdateLocation() {
|
||||
return lastUpdateLocation;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setPage(Page<T> page) {
|
||||
this.page = page;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setState(int value) {
|
||||
this.state = value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setLastUpdateLocation(Location location) {
|
||||
this.lastUpdateLocation = location;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,745 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.activemq.store.kahadb;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.Date;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
|
||||
import org.apache.activemq.broker.LockableServiceSupport;
|
||||
import org.apache.activemq.broker.Locker;
|
||||
import org.apache.activemq.store.SharedFileLocker;
|
||||
import org.apache.activemq.store.kahadb.data.KahaEntryType;
|
||||
import org.apache.activemq.store.kahadb.data.KahaTraceCommand;
|
||||
import org.apache.activemq.store.kahadb.disk.journal.Journal;
|
||||
import org.apache.activemq.store.kahadb.disk.journal.Location;
|
||||
import org.apache.activemq.store.kahadb.disk.page.PageFile;
|
||||
import org.apache.activemq.store.kahadb.disk.page.Transaction;
|
||||
import org.apache.activemq.util.ByteSequence;
|
||||
import org.apache.activemq.util.DataByteArrayInputStream;
|
||||
import org.apache.activemq.util.DataByteArrayOutputStream;
|
||||
import org.apache.activemq.util.IOHelper;
|
||||
import org.apache.activemq.util.ServiceStopper;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public abstract class AbstractKahaDBStore extends LockableServiceSupport {
|
||||
|
||||
static final Logger LOG = LoggerFactory.getLogger(AbstractKahaDBStore.class);
|
||||
|
||||
public static final String PROPERTY_LOG_SLOW_ACCESS_TIME = "org.apache.activemq.store.kahadb.LOG_SLOW_ACCESS_TIME";
|
||||
public static final int LOG_SLOW_ACCESS_TIME = Integer.getInteger(PROPERTY_LOG_SLOW_ACCESS_TIME, 0);
|
||||
|
||||
protected File directory;
|
||||
protected PageFile pageFile;
|
||||
protected Journal journal;
|
||||
protected AtomicLong journalSize = new AtomicLong(0);
|
||||
protected boolean failIfDatabaseIsLocked;
|
||||
protected long checkpointInterval = 5*1000;
|
||||
protected long cleanupInterval = 30*1000;
|
||||
protected boolean checkForCorruptJournalFiles = false;
|
||||
protected boolean checksumJournalFiles = true;
|
||||
protected boolean forceRecoverIndex = false;
|
||||
protected int journalMaxFileLength = Journal.DEFAULT_MAX_FILE_LENGTH;
|
||||
protected int journalMaxWriteBatchSize = Journal.DEFAULT_MAX_WRITE_BATCH_SIZE;
|
||||
protected boolean archiveCorruptedIndex = false;
|
||||
protected boolean enableIndexWriteAsync = false;
|
||||
protected boolean enableJournalDiskSyncs = false;
|
||||
protected boolean deleteAllJobs = false;
|
||||
protected int indexWriteBatchSize = PageFile.DEFAULT_WRITE_BATCH_SIZE;
|
||||
protected boolean useIndexLFRUEviction = false;
|
||||
protected float indexLFUEvictionFactor = 0.2f;
|
||||
protected boolean ignoreMissingJournalfiles = false;
|
||||
protected int indexCacheSize = 1000;
|
||||
protected boolean enableIndexDiskSyncs = true;
|
||||
protected boolean enableIndexRecoveryFile = true;
|
||||
protected boolean enableIndexPageCaching = true;
|
||||
protected boolean archiveDataLogs;
|
||||
protected boolean purgeStoreOnStartup;
|
||||
protected File directoryArchive;
|
||||
|
||||
protected AtomicBoolean opened = new AtomicBoolean();
|
||||
protected Thread checkpointThread;
|
||||
protected final Object checkpointThreadLock = new Object();
|
||||
protected ReentrantReadWriteLock checkpointLock = new ReentrantReadWriteLock();
|
||||
protected ReentrantReadWriteLock indexLock = new ReentrantReadWriteLock();
|
||||
|
||||
/**
|
||||
* @return the name to give this store's PageFile instance.
|
||||
*/
|
||||
protected abstract String getPageFileName();
|
||||
|
||||
/**
|
||||
* @return the location of the data directory if no set by configuration.
|
||||
*/
|
||||
protected abstract File getDefaultDataDirectory();
|
||||
|
||||
/**
|
||||
* Loads the store from disk.
|
||||
*
|
||||
* Based on configuration this method can either load an existing store or it can purge
|
||||
* an existing store and start in a clean state.
|
||||
*
|
||||
* @throws IOException if an error occurs during the load.
|
||||
*/
|
||||
public abstract void load() throws IOException;
|
||||
|
||||
/**
|
||||
* Unload the state of the Store to disk and shuts down all resources assigned to this
|
||||
* KahaDB store implementation.
|
||||
*
|
||||
* @throws IOException if an error occurs during the store unload.
|
||||
*/
|
||||
public abstract void unload() throws IOException;
|
||||
|
||||
@Override
|
||||
protected void doStart() throws Exception {
|
||||
this.indexLock.writeLock().lock();
|
||||
if (getDirectory() == null) {
|
||||
setDirectory(getDefaultDataDirectory());
|
||||
}
|
||||
IOHelper.mkdirs(getDirectory());
|
||||
try {
|
||||
if (isPurgeStoreOnStartup()) {
|
||||
getJournal().start();
|
||||
getJournal().delete();
|
||||
getJournal().close();
|
||||
journal = null;
|
||||
getPageFile().delete();
|
||||
LOG.info("{} Persistence store purged.", this);
|
||||
setPurgeStoreOnStartup(false);
|
||||
}
|
||||
|
||||
load();
|
||||
store(new KahaTraceCommand().setMessage("LOADED " + new Date()));
|
||||
} finally {
|
||||
this.indexLock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStop(ServiceStopper stopper) throws Exception {
|
||||
unload();
|
||||
}
|
||||
|
||||
public PageFile getPageFile() {
|
||||
if (pageFile == null) {
|
||||
pageFile = createPageFile();
|
||||
}
|
||||
return pageFile;
|
||||
}
|
||||
|
||||
public Journal getJournal() throws IOException {
|
||||
if (journal == null) {
|
||||
journal = createJournal();
|
||||
}
|
||||
return journal;
|
||||
}
|
||||
|
||||
public File getDirectory() {
|
||||
return directory;
|
||||
}
|
||||
|
||||
public void setDirectory(File directory) {
|
||||
this.directory = directory;
|
||||
}
|
||||
|
||||
public boolean isArchiveCorruptedIndex() {
|
||||
return archiveCorruptedIndex;
|
||||
}
|
||||
|
||||
public void setArchiveCorruptedIndex(boolean archiveCorruptedIndex) {
|
||||
this.archiveCorruptedIndex = archiveCorruptedIndex;
|
||||
}
|
||||
|
||||
public boolean isFailIfDatabaseIsLocked() {
|
||||
return failIfDatabaseIsLocked;
|
||||
}
|
||||
|
||||
public void setFailIfDatabaseIsLocked(boolean failIfDatabaseIsLocked) {
|
||||
this.failIfDatabaseIsLocked = failIfDatabaseIsLocked;
|
||||
}
|
||||
|
||||
public boolean isCheckForCorruptJournalFiles() {
|
||||
return checkForCorruptJournalFiles;
|
||||
}
|
||||
|
||||
public void setCheckForCorruptJournalFiles(boolean checkForCorruptJournalFiles) {
|
||||
this.checkForCorruptJournalFiles = checkForCorruptJournalFiles;
|
||||
}
|
||||
|
||||
public long getCheckpointInterval() {
|
||||
return checkpointInterval;
|
||||
}
|
||||
|
||||
public void setCheckpointInterval(long checkpointInterval) {
|
||||
this.checkpointInterval = checkpointInterval;
|
||||
}
|
||||
|
||||
public long getCleanupInterval() {
|
||||
return cleanupInterval;
|
||||
}
|
||||
|
||||
public void setCleanupInterval(long cleanupInterval) {
|
||||
this.cleanupInterval = cleanupInterval;
|
||||
}
|
||||
|
||||
public boolean isChecksumJournalFiles() {
|
||||
return checksumJournalFiles;
|
||||
}
|
||||
|
||||
public void setChecksumJournalFiles(boolean checksumJournalFiles) {
|
||||
this.checksumJournalFiles = checksumJournalFiles;
|
||||
}
|
||||
|
||||
public boolean isForceRecoverIndex() {
|
||||
return forceRecoverIndex;
|
||||
}
|
||||
|
||||
public void setForceRecoverIndex(boolean forceRecoverIndex) {
|
||||
this.forceRecoverIndex = forceRecoverIndex;
|
||||
}
|
||||
|
||||
public int getJournalMaxFileLength() {
|
||||
return journalMaxFileLength;
|
||||
}
|
||||
|
||||
public void setJournalMaxFileLength(int journalMaxFileLength) {
|
||||
this.journalMaxFileLength = journalMaxFileLength;
|
||||
}
|
||||
|
||||
public int getJournalMaxWriteBatchSize() {
|
||||
return journalMaxWriteBatchSize;
|
||||
}
|
||||
|
||||
public void setJournalMaxWriteBatchSize(int journalMaxWriteBatchSize) {
|
||||
this.journalMaxWriteBatchSize = journalMaxWriteBatchSize;
|
||||
}
|
||||
|
||||
public boolean isEnableIndexWriteAsync() {
|
||||
return enableIndexWriteAsync;
|
||||
}
|
||||
|
||||
public void setEnableIndexWriteAsync(boolean enableIndexWriteAsync) {
|
||||
this.enableIndexWriteAsync = enableIndexWriteAsync;
|
||||
}
|
||||
|
||||
public boolean isEnableJournalDiskSyncs() {
|
||||
return enableJournalDiskSyncs;
|
||||
}
|
||||
|
||||
public void setEnableJournalDiskSyncs(boolean syncWrites) {
|
||||
this.enableJournalDiskSyncs = syncWrites;
|
||||
}
|
||||
|
||||
public boolean isDeleteAllJobs() {
|
||||
return deleteAllJobs;
|
||||
}
|
||||
|
||||
public void setDeleteAllJobs(boolean deleteAllJobs) {
|
||||
this.deleteAllJobs = deleteAllJobs;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the archiveDataLogs
|
||||
*/
|
||||
public boolean isArchiveDataLogs() {
|
||||
return this.archiveDataLogs;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param archiveDataLogs the archiveDataLogs to set
|
||||
*/
|
||||
public void setArchiveDataLogs(boolean archiveDataLogs) {
|
||||
this.archiveDataLogs = archiveDataLogs;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the directoryArchive
|
||||
*/
|
||||
public File getDirectoryArchive() {
|
||||
return this.directoryArchive;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param directoryArchive the directoryArchive to set
|
||||
*/
|
||||
public void setDirectoryArchive(File directoryArchive) {
|
||||
this.directoryArchive = directoryArchive;
|
||||
}
|
||||
|
||||
public int getIndexCacheSize() {
|
||||
return indexCacheSize;
|
||||
}
|
||||
|
||||
public void setIndexCacheSize(int indexCacheSize) {
|
||||
this.indexCacheSize = indexCacheSize;
|
||||
}
|
||||
|
||||
public int getIndexWriteBatchSize() {
|
||||
return indexWriteBatchSize;
|
||||
}
|
||||
|
||||
public void setIndexWriteBatchSize(int indexWriteBatchSize) {
|
||||
this.indexWriteBatchSize = indexWriteBatchSize;
|
||||
}
|
||||
|
||||
public boolean isUseIndexLFRUEviction() {
|
||||
return useIndexLFRUEviction;
|
||||
}
|
||||
|
||||
public void setUseIndexLFRUEviction(boolean useIndexLFRUEviction) {
|
||||
this.useIndexLFRUEviction = useIndexLFRUEviction;
|
||||
}
|
||||
|
||||
public float getIndexLFUEvictionFactor() {
|
||||
return indexLFUEvictionFactor;
|
||||
}
|
||||
|
||||
public void setIndexLFUEvictionFactor(float indexLFUEvictionFactor) {
|
||||
this.indexLFUEvictionFactor = indexLFUEvictionFactor;
|
||||
}
|
||||
|
||||
public boolean isEnableIndexDiskSyncs() {
|
||||
return enableIndexDiskSyncs;
|
||||
}
|
||||
|
||||
public void setEnableIndexDiskSyncs(boolean enableIndexDiskSyncs) {
|
||||
this.enableIndexDiskSyncs = enableIndexDiskSyncs;
|
||||
}
|
||||
|
||||
public boolean isEnableIndexRecoveryFile() {
|
||||
return enableIndexRecoveryFile;
|
||||
}
|
||||
|
||||
public void setEnableIndexRecoveryFile(boolean enableIndexRecoveryFile) {
|
||||
this.enableIndexRecoveryFile = enableIndexRecoveryFile;
|
||||
}
|
||||
|
||||
public boolean isEnableIndexPageCaching() {
|
||||
return enableIndexPageCaching;
|
||||
}
|
||||
|
||||
public void setEnableIndexPageCaching(boolean enableIndexPageCaching) {
|
||||
this.enableIndexPageCaching = enableIndexPageCaching;
|
||||
}
|
||||
|
||||
public boolean isPurgeStoreOnStartup() {
|
||||
return this.purgeStoreOnStartup;
|
||||
}
|
||||
|
||||
public void setPurgeStoreOnStartup(boolean purge) {
|
||||
this.purgeStoreOnStartup = purge;
|
||||
}
|
||||
|
||||
public boolean isIgnoreMissingJournalfiles() {
|
||||
return ignoreMissingJournalfiles;
|
||||
}
|
||||
|
||||
public void setIgnoreMissingJournalfiles(boolean ignoreMissingJournalfiles) {
|
||||
this.ignoreMissingJournalfiles = ignoreMissingJournalfiles;
|
||||
}
|
||||
|
||||
public long size() {
|
||||
if (!isStarted()) {
|
||||
return 0;
|
||||
}
|
||||
try {
|
||||
return journalSize.get() + pageFile.getDiskSize();
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Locker createDefaultLocker() throws IOException {
|
||||
SharedFileLocker locker = new SharedFileLocker();
|
||||
locker.setDirectory(this.getDirectory());
|
||||
return locker;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init() throws Exception {
|
||||
}
|
||||
|
||||
/**
|
||||
* Store a command in the Journal and process to update the Store index.
|
||||
*
|
||||
* @param command
|
||||
* The specific JournalCommand to store and process.
|
||||
*
|
||||
* @returns the Location where the data was written in the Journal.
|
||||
*
|
||||
* @throws IOException if an error occurs storing or processing the command.
|
||||
*/
|
||||
public Location store(JournalCommand<?> command) throws IOException {
|
||||
return store(command, isEnableIndexDiskSyncs(), null, null, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Store a command in the Journal and process to update the Store index.
|
||||
*
|
||||
* @param command
|
||||
* The specific JournalCommand to store and process.
|
||||
* @param sync
|
||||
* Should the store operation be done synchronously. (ignored if completion passed).
|
||||
*
|
||||
* @returns the Location where the data was written in the Journal.
|
||||
*
|
||||
* @throws IOException if an error occurs storing or processing the command.
|
||||
*/
|
||||
public Location store(JournalCommand<?> command, boolean sync) throws IOException {
|
||||
return store(command, sync, null, null, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Store a command in the Journal and process to update the Store index.
|
||||
*
|
||||
* @param command
|
||||
* The specific JournalCommand to store and process.
|
||||
* @param onJournalStoreComplete
|
||||
* The Runnable to call when the Journal write operation completes.
|
||||
*
|
||||
* @returns the Location where the data was written in the Journal.
|
||||
*
|
||||
* @throws IOException if an error occurs storing or processing the command.
|
||||
*/
|
||||
public Location store(JournalCommand<?> command, Runnable onJournalStoreComplete) throws IOException {
|
||||
return store(command, isEnableIndexDiskSyncs(), null, null, onJournalStoreComplete);
|
||||
}
|
||||
|
||||
/**
|
||||
* Store a command in the Journal and process to update the Store index.
|
||||
*
|
||||
* @param command
|
||||
* The specific JournalCommand to store and process.
|
||||
* @param sync
|
||||
* Should the store operation be done synchronously. (ignored if completion passed).
|
||||
* @param before
|
||||
* The Runnable instance to execute before performing the store and process operation.
|
||||
* @param after
|
||||
* The Runnable instance to execute after performing the store and process operation.
|
||||
*
|
||||
* @returns the Location where the data was written in the Journal.
|
||||
*
|
||||
* @throws IOException if an error occurs storing or processing the command.
|
||||
*/
|
||||
public Location store(JournalCommand<?> command, boolean sync, Runnable before, Runnable after) throws IOException {
|
||||
return store(command, sync, before, after, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* All updated are are funneled through this method. The updates are converted to a
|
||||
* JournalMessage which is logged to the journal and then the data from the JournalMessage
|
||||
* is used to update the index just like it would be done during a recovery process.
|
||||
*
|
||||
* @param command
|
||||
* The specific JournalCommand to store and process.
|
||||
* @param sync
|
||||
* Should the store operation be done synchronously. (ignored if completion passed).
|
||||
* @param before
|
||||
* The Runnable instance to execute before performing the store and process operation.
|
||||
* @param after
|
||||
* The Runnable instance to execute after performing the store and process operation.
|
||||
* @param onJournalStoreComplete
|
||||
* Callback to be run when the journal write operation is complete.
|
||||
*
|
||||
* @returns the Location where the data was written in the Journal.
|
||||
*
|
||||
* @throws IOException if an error occurs storing or processing the command.
|
||||
*/
|
||||
public Location store(JournalCommand<?> command, boolean sync, Runnable before, Runnable after, Runnable onJournalStoreComplete) throws IOException {
|
||||
try {
|
||||
|
||||
if (before != null) {
|
||||
before.run();
|
||||
}
|
||||
|
||||
ByteSequence sequence = toByteSequence(command);
|
||||
Location location;
|
||||
checkpointLock.readLock().lock();
|
||||
try {
|
||||
|
||||
long start = System.currentTimeMillis();
|
||||
location = onJournalStoreComplete == null ? journal.write(sequence, sync) :
|
||||
journal.write(sequence, onJournalStoreComplete);
|
||||
long start2 = System.currentTimeMillis();
|
||||
|
||||
process(command, location);
|
||||
|
||||
long end = System.currentTimeMillis();
|
||||
if (LOG_SLOW_ACCESS_TIME > 0 && end - start > LOG_SLOW_ACCESS_TIME) {
|
||||
LOG.info("Slow KahaDB access: Journal append took: {} ms, Index update took {} ms",
|
||||
(start2-start), (end-start2));
|
||||
}
|
||||
} finally {
|
||||
checkpointLock.readLock().unlock();
|
||||
}
|
||||
|
||||
if (after != null) {
|
||||
after.run();
|
||||
}
|
||||
|
||||
if (checkpointThread != null && !checkpointThread.isAlive()) {
|
||||
startCheckpoint();
|
||||
}
|
||||
return location;
|
||||
} catch (IOException ioe) {
|
||||
LOG.error("KahaDB failed to store to Journal", ioe);
|
||||
if (brokerService != null) {
|
||||
brokerService.handleIOException(ioe);
|
||||
}
|
||||
throw ioe;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads a previously stored JournalMessage
|
||||
*
|
||||
* @param location
|
||||
* The location of the journal command to read.
|
||||
*
|
||||
* @return a new un-marshaled JournalCommand instance.
|
||||
*
|
||||
* @throws IOException if an error occurs reading the stored command.
|
||||
*/
|
||||
protected JournalCommand<?> load(Location location) throws IOException {
|
||||
ByteSequence data = journal.read(location);
|
||||
DataByteArrayInputStream is = new DataByteArrayInputStream(data);
|
||||
byte readByte = is.readByte();
|
||||
KahaEntryType type = KahaEntryType.valueOf(readByte);
|
||||
if (type == null) {
|
||||
try {
|
||||
is.close();
|
||||
} catch (IOException e) {
|
||||
}
|
||||
throw new IOException("Could not load journal record. Invalid location: " + location);
|
||||
}
|
||||
JournalCommand<?> message = (JournalCommand<?>)type.createMessage();
|
||||
message.mergeFramed(is);
|
||||
return message;
|
||||
}
|
||||
|
||||
/**
|
||||
* Process a stored or recovered JournalCommand instance and update the DB Index with the
|
||||
* state changes that this command produces. This can be called either as a new DB operation
|
||||
* or as a replay during recovery operations.
|
||||
*
|
||||
* @param command
|
||||
* The JournalCommand to process.
|
||||
* @param location
|
||||
* The location in the Journal where the command was written or read from.
|
||||
*/
|
||||
protected abstract void process(JournalCommand<?> command, Location location) throws IOException;
|
||||
|
||||
/**
|
||||
* Perform a checkpoint operation with optional cleanup.
|
||||
*
|
||||
* Called by the checkpoint background thread periodically to initiate a checkpoint operation
|
||||
* and if the cleanup flag is set a cleanup sweep should be done to allow for release of no
|
||||
* longer needed journal log files etc.
|
||||
*
|
||||
* @param cleanup
|
||||
* Should the method do a simple checkpoint or also perform a journal cleanup.
|
||||
*
|
||||
* @throws IOException if an error occurs during the checkpoint operation.
|
||||
*/
|
||||
protected void checkpointUpdate(final boolean cleanup) throws IOException {
|
||||
checkpointLock.writeLock().lock();
|
||||
try {
|
||||
this.indexLock.writeLock().lock();
|
||||
try {
|
||||
pageFile.tx().execute(new Transaction.Closure<IOException>() {
|
||||
@Override
|
||||
public void execute(Transaction tx) throws IOException {
|
||||
checkpointUpdate(tx, cleanup);
|
||||
}
|
||||
});
|
||||
} finally {
|
||||
this.indexLock.writeLock().unlock();
|
||||
}
|
||||
|
||||
} finally {
|
||||
checkpointLock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform the checkpoint update operation. If the cleanup flag is true then the
|
||||
* operation should also purge any unused Journal log files.
|
||||
*
|
||||
* This method must always be called with the checkpoint and index write locks held.
|
||||
*
|
||||
* @param tx
|
||||
* The TX under which to perform the checkpoint update.
|
||||
* @param cleanup
|
||||
* Should the checkpoint also do unused Journal file cleanup.
|
||||
*
|
||||
* @throws IOException if an error occurs while performing the checkpoint.
|
||||
*/
|
||||
protected abstract void checkpointUpdate(Transaction tx, boolean cleanup) throws IOException;
|
||||
|
||||
/**
|
||||
* Creates a new ByteSequence that represents the marshaled form of the given Journal Command.
|
||||
*
|
||||
* @param command
|
||||
* The Journal Command that should be marshaled to bytes for writing.
|
||||
*
|
||||
* @return the byte representation of the given journal command.
|
||||
*
|
||||
* @throws IOException if an error occurs while serializing the command.
|
||||
*/
|
||||
protected ByteSequence toByteSequence(JournalCommand<?> data) throws IOException {
|
||||
int size = data.serializedSizeFramed();
|
||||
DataByteArrayOutputStream os = new DataByteArrayOutputStream(size + 1);
|
||||
os.writeByte(data.type().getNumber());
|
||||
data.writeFramed(os);
|
||||
return os.toByteSequence();
|
||||
}
|
||||
|
||||
/**
|
||||
* Create the PageFile instance and configure it using the configuration options
|
||||
* currently set.
|
||||
*
|
||||
* @return the newly created and configured PageFile instance.
|
||||
*/
|
||||
protected PageFile createPageFile() {
|
||||
PageFile index = new PageFile(getDirectory(), getPageFileName());
|
||||
index.setEnableWriteThread(isEnableIndexWriteAsync());
|
||||
index.setWriteBatchSize(getIndexWriteBatchSize());
|
||||
index.setPageCacheSize(getIndexCacheSize());
|
||||
index.setUseLFRUEviction(isUseIndexLFRUEviction());
|
||||
index.setLFUEvictionFactor(getIndexLFUEvictionFactor());
|
||||
index.setEnableDiskSyncs(isEnableIndexDiskSyncs());
|
||||
index.setEnableRecoveryFile(isEnableIndexRecoveryFile());
|
||||
index.setEnablePageCaching(isEnableIndexPageCaching());
|
||||
return index;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new Journal instance and configure it using the currently set configuration
|
||||
* options. If an archive directory is configured than this method will attempt to create
|
||||
* that directory if it does not already exist.
|
||||
*
|
||||
* @return the newly created an configured Journal instance.
|
||||
*
|
||||
* @throws IOException if an error occurs while creating the Journal object.
|
||||
*/
|
||||
protected Journal createJournal() throws IOException {
|
||||
Journal manager = new Journal();
|
||||
manager.setDirectory(getDirectory());
|
||||
manager.setMaxFileLength(getJournalMaxFileLength());
|
||||
manager.setCheckForCorruptionOnStartup(isCheckForCorruptJournalFiles());
|
||||
manager.setChecksum(isChecksumJournalFiles() || isCheckForCorruptJournalFiles());
|
||||
manager.setWriteBatchSize(getJournalMaxWriteBatchSize());
|
||||
manager.setArchiveDataLogs(isArchiveDataLogs());
|
||||
manager.setSizeAccumulator(journalSize);
|
||||
manager.setEnableAsyncDiskSync(isEnableJournalDiskSyncs());
|
||||
if (getDirectoryArchive() != null) {
|
||||
IOHelper.mkdirs(getDirectoryArchive());
|
||||
manager.setDirectoryArchive(getDirectoryArchive());
|
||||
}
|
||||
return manager;
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts the checkpoint Thread instance if not already running and not disabled
|
||||
* by configuration.
|
||||
*/
|
||||
protected void startCheckpoint() {
|
||||
if (checkpointInterval == 0 && cleanupInterval == 0) {
|
||||
LOG.info("periodic checkpoint/cleanup disabled, will ocurr on clean shutdown/restart");
|
||||
return;
|
||||
}
|
||||
synchronized (checkpointThreadLock) {
|
||||
boolean start = false;
|
||||
if (checkpointThread == null) {
|
||||
start = true;
|
||||
} else if (!checkpointThread.isAlive()) {
|
||||
start = true;
|
||||
LOG.info("KahaDB: Recovering checkpoint thread after death");
|
||||
}
|
||||
if (start) {
|
||||
checkpointThread = new Thread("ActiveMQ Journal Checkpoint Worker") {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
long lastCleanup = System.currentTimeMillis();
|
||||
long lastCheckpoint = System.currentTimeMillis();
|
||||
// Sleep for a short time so we can periodically check
|
||||
// to see if we need to exit this thread.
|
||||
long sleepTime = Math.min(checkpointInterval > 0 ? checkpointInterval : cleanupInterval, 500);
|
||||
while (opened.get()) {
|
||||
Thread.sleep(sleepTime);
|
||||
long now = System.currentTimeMillis();
|
||||
if( cleanupInterval > 0 && (now - lastCleanup >= cleanupInterval) ) {
|
||||
checkpointCleanup(true);
|
||||
lastCleanup = now;
|
||||
lastCheckpoint = now;
|
||||
} else if( checkpointInterval > 0 && (now - lastCheckpoint >= checkpointInterval )) {
|
||||
checkpointCleanup(false);
|
||||
lastCheckpoint = now;
|
||||
}
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
// Looks like someone really wants us to exit this thread...
|
||||
} catch (IOException ioe) {
|
||||
LOG.error("Checkpoint failed", ioe);
|
||||
brokerService.handleIOException(ioe);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
checkpointThread.setDaemon(true);
|
||||
checkpointThread.start();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Called from the worker thread to start a checkpoint.
|
||||
*
|
||||
* This method ensure that the store is in an opened state and optionaly logs information
|
||||
* related to slow store access times.
|
||||
*
|
||||
* @param cleanup
|
||||
* Should a cleanup of the journal occur during the checkpoint operation.
|
||||
*
|
||||
* @throws IOException if an error occurs during the checkpoint operation.
|
||||
*/
|
||||
protected void checkpointCleanup(final boolean cleanup) throws IOException {
|
||||
long start;
|
||||
this.indexLock.writeLock().lock();
|
||||
try {
|
||||
start = System.currentTimeMillis();
|
||||
if (!opened.get()) {
|
||||
return;
|
||||
}
|
||||
} finally {
|
||||
this.indexLock.writeLock().unlock();
|
||||
}
|
||||
checkpointUpdate(cleanup);
|
||||
long end = System.currentTimeMillis();
|
||||
if (LOG_SLOW_ACCESS_TIME > 0 && end - start > LOG_SLOW_ACCESS_TIME) {
|
||||
LOG.info("Slow KahaDB access: cleanup took {}", (end - start));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,135 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.activemq.store.kahadb;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.activemq.store.kahadb.disk.journal.Location;
|
||||
import org.apache.activemq.store.kahadb.disk.page.Page;
|
||||
import org.apache.activemq.store.kahadb.disk.page.Transaction;
|
||||
|
||||
/**
|
||||
* Interface for the store meta data used to hold the index value and other needed
|
||||
* information to manage a KahaDB store instance.
|
||||
*/
|
||||
public interface KahaDBMetaData<T> {
|
||||
|
||||
/**
|
||||
* Indicates that this meta data instance has been opened and is active.
|
||||
*/
|
||||
public static final int OPEN_STATE = 2;
|
||||
|
||||
/**
|
||||
* Indicates that this meta data instance has been closed and is no longer active.
|
||||
*/
|
||||
public static final int CLOSED_STATE = 1;
|
||||
|
||||
/**
|
||||
* Gets the Page in the store PageFile where the KahaDBMetaData instance is stored.
|
||||
*
|
||||
* @return the Page to use to start access the KahaDBMetaData instance.
|
||||
*/
|
||||
Page<T> getPage();
|
||||
|
||||
/**
|
||||
* Sets the Page instance used to load and store the KahaDBMetaData instance.
|
||||
*
|
||||
* @param page
|
||||
* the new Page value to use.
|
||||
*/
|
||||
void setPage(Page<T> page);
|
||||
|
||||
/**
|
||||
* Gets the state flag of this meta data instance.
|
||||
*
|
||||
* @return the current state value for this instance.
|
||||
*/
|
||||
int getState();
|
||||
|
||||
/**
|
||||
* Sets the current value of the state flag.
|
||||
*
|
||||
* @param value
|
||||
* the new value to assign to the state flag.
|
||||
*/
|
||||
void setState(int value);
|
||||
|
||||
/**
|
||||
* Returns the Journal Location value that indicates that last recorded update
|
||||
* that was successfully performed for this KahaDB store implementation.
|
||||
*
|
||||
* @return the location of the last successful update location.
|
||||
*/
|
||||
Location getLastUpdateLocation();
|
||||
|
||||
/**
|
||||
* Updates the value of the last successful update.
|
||||
*
|
||||
* @param location
|
||||
* the new value to assign the last update location field.
|
||||
*/
|
||||
void setLastUpdateLocation(Location location);
|
||||
|
||||
/**
|
||||
* For a newly created KahaDBMetaData instance this method is called to allow
|
||||
* the instance to create all of it's internal indices and other state data.
|
||||
*
|
||||
* @param tx
|
||||
* the Transaction instance under which the operation is executed.
|
||||
*
|
||||
* @throws IOException if an error occurs while creating the meta data structures.
|
||||
*/
|
||||
void initialize(Transaction tx) throws IOException;
|
||||
|
||||
/**
|
||||
* Instructs this object to load its internal data structures from the KahaDB PageFile
|
||||
* and prepare itself for use.
|
||||
*
|
||||
* @param tx
|
||||
* the Transaction instance under which the operation is executed.
|
||||
*
|
||||
* @throws IOException if an error occurs while creating the meta data structures.
|
||||
*/
|
||||
void load(Transaction tx) throws IOException;
|
||||
|
||||
/**
|
||||
* Reads the serialized for of this object from the KadaDB PageFile and prepares it
|
||||
* for use. This method does not need to perform a full load of the meta data structures
|
||||
* only read in the information necessary to load them from the PageFile on a call to the
|
||||
* load method.
|
||||
*
|
||||
* @param in
|
||||
* the DataInput instance used to read this objects serialized form.
|
||||
*
|
||||
* @throws IOException if an error occurs while reading the serialized form.
|
||||
*/
|
||||
void read(DataInput in) throws IOException;
|
||||
|
||||
/**
|
||||
* Writes the object into a serialized form which can be read back in again using the
|
||||
* read method.
|
||||
*
|
||||
* @param out
|
||||
* the DataOutput instance to use to write the current state to a serialized form.
|
||||
*
|
||||
* @throws IOException if an error occurs while serializing this instance.
|
||||
*/
|
||||
void write(DataOutput out) throws IOException;
|
||||
|
||||
}
|
|
@ -31,6 +31,7 @@ import org.apache.activemq.broker.LockableServiceSupport;
|
|||
import org.apache.activemq.broker.Locker;
|
||||
import org.apache.activemq.broker.jmx.AnnotatedMBean;
|
||||
import org.apache.activemq.broker.jmx.PersistenceAdapterView;
|
||||
import org.apache.activemq.broker.scheduler.JobSchedulerStore;
|
||||
import org.apache.activemq.command.ActiveMQDestination;
|
||||
import org.apache.activemq.command.ActiveMQQueue;
|
||||
import org.apache.activemq.command.ActiveMQTopic;
|
||||
|
@ -39,7 +40,14 @@ import org.apache.activemq.command.ProducerId;
|
|||
import org.apache.activemq.command.TransactionId;
|
||||
import org.apache.activemq.command.XATransactionId;
|
||||
import org.apache.activemq.protobuf.Buffer;
|
||||
import org.apache.activemq.store.*;
|
||||
import org.apache.activemq.store.JournaledStore;
|
||||
import org.apache.activemq.store.MessageStore;
|
||||
import org.apache.activemq.store.PersistenceAdapter;
|
||||
import org.apache.activemq.store.SharedFileLocker;
|
||||
import org.apache.activemq.store.TopicMessageStore;
|
||||
import org.apache.activemq.store.TransactionIdTransformer;
|
||||
import org.apache.activemq.store.TransactionIdTransformerAware;
|
||||
import org.apache.activemq.store.TransactionStore;
|
||||
import org.apache.activemq.store.kahadb.data.KahaLocalTransactionId;
|
||||
import org.apache.activemq.store.kahadb.data.KahaTransactionInfo;
|
||||
import org.apache.activemq.store.kahadb.data.KahaXATransactionId;
|
||||
|
@ -642,4 +650,9 @@ public class KahaDBPersistenceAdapter extends LockableServiceSupport implements
|
|||
public void setTransactionIdTransformer(TransactionIdTransformer transactionIdTransformer) {
|
||||
getStore().setTransactionIdTransformer(transactionIdTransformer);
|
||||
}
|
||||
|
||||
@Override
|
||||
public JobSchedulerStore createJobSchedulerStore() throws IOException, UnsupportedOperationException {
|
||||
return this.letter.createJobSchedulerStore();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,6 +42,7 @@ import java.util.concurrent.atomic.AtomicInteger;
|
|||
import org.apache.activemq.broker.ConnectionContext;
|
||||
import org.apache.activemq.broker.region.Destination;
|
||||
import org.apache.activemq.broker.region.RegionBroker;
|
||||
import org.apache.activemq.broker.scheduler.JobSchedulerStore;
|
||||
import org.apache.activemq.command.ActiveMQDestination;
|
||||
import org.apache.activemq.command.ActiveMQQueue;
|
||||
import org.apache.activemq.command.ActiveMQTempQueue;
|
||||
|
@ -55,7 +56,14 @@ import org.apache.activemq.command.SubscriptionInfo;
|
|||
import org.apache.activemq.command.TransactionId;
|
||||
import org.apache.activemq.openwire.OpenWireFormat;
|
||||
import org.apache.activemq.protobuf.Buffer;
|
||||
import org.apache.activemq.store.*;
|
||||
import org.apache.activemq.store.AbstractMessageStore;
|
||||
import org.apache.activemq.store.ListenableFuture;
|
||||
import org.apache.activemq.store.MessageRecoveryListener;
|
||||
import org.apache.activemq.store.MessageStore;
|
||||
import org.apache.activemq.store.PersistenceAdapter;
|
||||
import org.apache.activemq.store.TopicMessageStore;
|
||||
import org.apache.activemq.store.TransactionIdTransformer;
|
||||
import org.apache.activemq.store.TransactionStore;
|
||||
import org.apache.activemq.store.kahadb.data.KahaAddMessageCommand;
|
||||
import org.apache.activemq.store.kahadb.data.KahaDestination;
|
||||
import org.apache.activemq.store.kahadb.data.KahaDestination.DestinationType;
|
||||
|
@ -66,6 +74,7 @@ import org.apache.activemq.store.kahadb.data.KahaSubscriptionCommand;
|
|||
import org.apache.activemq.store.kahadb.data.KahaUpdateMessageCommand;
|
||||
import org.apache.activemq.store.kahadb.disk.journal.Location;
|
||||
import org.apache.activemq.store.kahadb.disk.page.Transaction;
|
||||
import org.apache.activemq.store.kahadb.scheduler.JobSchedulerStoreImpl;
|
||||
import org.apache.activemq.usage.MemoryUsage;
|
||||
import org.apache.activemq.usage.SystemUsage;
|
||||
import org.apache.activemq.util.ServiceStopper;
|
||||
|
@ -172,6 +181,7 @@ public class KahaDBStore extends MessageDatabase implements PersistenceAdapter {
|
|||
public int getMaxAsyncJobs() {
|
||||
return this.maxAsyncJobs;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param maxAsyncJobs
|
||||
* the maxAsyncJobs to set
|
||||
|
@ -426,6 +436,7 @@ public class KahaDBStore extends MessageDatabase implements PersistenceAdapter {
|
|||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateMessage(Message message) throws IOException {
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("updating: " + message.getMessageId() + " with deliveryCount: " + message.getRedeliveryCounter());
|
||||
|
@ -472,7 +483,7 @@ public class KahaDBStore extends MessageDatabase implements PersistenceAdapter {
|
|||
indexLock.writeLock().lock();
|
||||
try {
|
||||
location = findMessageLocation(key, dest);
|
||||
}finally {
|
||||
} finally {
|
||||
indexLock.writeLock().unlock();
|
||||
}
|
||||
if (location == null) {
|
||||
|
@ -492,19 +503,17 @@ public class KahaDBStore extends MessageDatabase implements PersistenceAdapter {
|
|||
@Override
|
||||
public Integer execute(Transaction tx) throws IOException {
|
||||
// Iterate through all index entries to get a count
|
||||
// of
|
||||
// messages in the destination.
|
||||
// of messages in the destination.
|
||||
StoredDestination sd = getStoredDestination(dest, tx);
|
||||
int rc = 0;
|
||||
for (Iterator<Entry<Location, Long>> iterator = sd.locationIndex.iterator(tx); iterator
|
||||
.hasNext();) {
|
||||
for (Iterator<Entry<Location, Long>> iterator = sd.locationIndex.iterator(tx); iterator.hasNext();) {
|
||||
iterator.next();
|
||||
rc++;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
});
|
||||
}finally {
|
||||
} finally {
|
||||
indexLock.writeLock().unlock();
|
||||
}
|
||||
} finally {
|
||||
|
@ -525,7 +534,7 @@ public class KahaDBStore extends MessageDatabase implements PersistenceAdapter {
|
|||
return sd.locationIndex.isEmpty(tx);
|
||||
}
|
||||
});
|
||||
}finally {
|
||||
} finally {
|
||||
indexLock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
@ -552,12 +561,11 @@ public class KahaDBStore extends MessageDatabase implements PersistenceAdapter {
|
|||
}
|
||||
}
|
||||
});
|
||||
}finally {
|
||||
} finally {
|
||||
indexLock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void recoverNextMessages(final int maxReturned, final MessageRecoveryListener listener) throws Exception {
|
||||
indexLock.writeLock().lock();
|
||||
|
@ -583,7 +591,7 @@ public class KahaDBStore extends MessageDatabase implements PersistenceAdapter {
|
|||
sd.orderIndex.stoppedIterating();
|
||||
}
|
||||
});
|
||||
}finally {
|
||||
} finally {
|
||||
indexLock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
@ -628,7 +636,7 @@ public class KahaDBStore extends MessageDatabase implements PersistenceAdapter {
|
|||
});
|
||||
} catch (Exception e) {
|
||||
LOG.error("Failed to reset batching",e);
|
||||
}finally {
|
||||
} finally {
|
||||
indexLock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
@ -641,8 +649,7 @@ public class KahaDBStore extends MessageDatabase implements PersistenceAdapter {
|
|||
lockAsyncJobQueue();
|
||||
|
||||
// Hopefully one day the page file supports concurrent read
|
||||
// operations... but for now we must
|
||||
// externally synchronize...
|
||||
// operations... but for now we must externally synchronize...
|
||||
|
||||
indexLock.writeLock().lock();
|
||||
try {
|
||||
|
@ -725,8 +732,7 @@ public class KahaDBStore extends MessageDatabase implements PersistenceAdapter {
|
|||
|
||||
@Override
|
||||
public void acknowledge(ConnectionContext context, String clientId, String subscriptionName,
|
||||
MessageId messageId, MessageAck ack)
|
||||
throws IOException {
|
||||
MessageId messageId, MessageAck ack) throws IOException {
|
||||
String subscriptionKey = subscriptionKey(clientId, subscriptionName).toString();
|
||||
if (isConcurrentStoreAndDispatchTopics()) {
|
||||
AsyncJobKey key = new AsyncJobKey(messageId, getDestination());
|
||||
|
@ -810,7 +816,7 @@ public class KahaDBStore extends MessageDatabase implements PersistenceAdapter {
|
|||
}
|
||||
}
|
||||
});
|
||||
}finally {
|
||||
} finally {
|
||||
indexLock.writeLock().unlock();
|
||||
}
|
||||
|
||||
|
@ -836,7 +842,7 @@ public class KahaDBStore extends MessageDatabase implements PersistenceAdapter {
|
|||
.getSubscriptionInfo().newInput()));
|
||||
}
|
||||
});
|
||||
}finally {
|
||||
} finally {
|
||||
indexLock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
@ -859,7 +865,7 @@ public class KahaDBStore extends MessageDatabase implements PersistenceAdapter {
|
|||
return (int) getStoredMessageCount(tx, sd, subscriptionKey);
|
||||
}
|
||||
});
|
||||
}finally {
|
||||
} finally {
|
||||
indexLock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
@ -890,7 +896,7 @@ public class KahaDBStore extends MessageDatabase implements PersistenceAdapter {
|
|||
sd.orderIndex.resetCursorPosition();
|
||||
}
|
||||
});
|
||||
}finally {
|
||||
} finally {
|
||||
indexLock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
@ -943,7 +949,7 @@ public class KahaDBStore extends MessageDatabase implements PersistenceAdapter {
|
|||
}
|
||||
}
|
||||
});
|
||||
}finally {
|
||||
} finally {
|
||||
indexLock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
@ -1358,7 +1364,6 @@ public class KahaDBStore extends MessageDatabase implements PersistenceAdapter {
|
|||
LOG.warn("Failed to aquire lock", e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -1422,7 +1427,11 @@ public class KahaDBStore extends MessageDatabase implements PersistenceAdapter {
|
|||
if (runnable instanceof StoreTask) {
|
||||
((StoreTask)runnable).releaseLocks();
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public JobSchedulerStore createJobSchedulerStore() throws IOException, UnsupportedOperationException {
|
||||
return new JobSchedulerStoreImpl();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,12 +16,44 @@
|
|||
*/
|
||||
package org.apache.activemq.store.kahadb;
|
||||
|
||||
import org.apache.activemq.broker.*;
|
||||
import org.apache.activemq.command.*;
|
||||
import java.io.File;
|
||||
import java.io.FileFilter;
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.Charset;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import javax.transaction.xa.Xid;
|
||||
|
||||
import org.apache.activemq.broker.BrokerService;
|
||||
import org.apache.activemq.broker.BrokerServiceAware;
|
||||
import org.apache.activemq.broker.ConnectionContext;
|
||||
import org.apache.activemq.broker.Lockable;
|
||||
import org.apache.activemq.broker.LockableServiceSupport;
|
||||
import org.apache.activemq.broker.Locker;
|
||||
import org.apache.activemq.broker.scheduler.JobSchedulerStore;
|
||||
import org.apache.activemq.command.ActiveMQDestination;
|
||||
import org.apache.activemq.command.ActiveMQQueue;
|
||||
import org.apache.activemq.command.ActiveMQTopic;
|
||||
import org.apache.activemq.command.LocalTransactionId;
|
||||
import org.apache.activemq.command.ProducerId;
|
||||
import org.apache.activemq.command.TransactionId;
|
||||
import org.apache.activemq.command.XATransactionId;
|
||||
import org.apache.activemq.filter.AnyDestination;
|
||||
import org.apache.activemq.filter.DestinationMap;
|
||||
import org.apache.activemq.filter.DestinationMapEntry;
|
||||
import org.apache.activemq.store.*;
|
||||
import org.apache.activemq.store.MessageStore;
|
||||
import org.apache.activemq.store.PersistenceAdapter;
|
||||
import org.apache.activemq.store.SharedFileLocker;
|
||||
import org.apache.activemq.store.TopicMessageStore;
|
||||
import org.apache.activemq.store.TransactionIdTransformer;
|
||||
import org.apache.activemq.store.TransactionIdTransformerAware;
|
||||
import org.apache.activemq.store.TransactionStore;
|
||||
import org.apache.activemq.store.kahadb.scheduler.JobSchedulerStoreImpl;
|
||||
import org.apache.activemq.usage.SystemUsage;
|
||||
import org.apache.activemq.util.IOExceptionSupport;
|
||||
import org.apache.activemq.util.IOHelper;
|
||||
|
@ -30,13 +62,6 @@ import org.apache.activemq.util.ServiceStopper;
|
|||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import javax.transaction.xa.Xid;
|
||||
import java.io.File;
|
||||
import java.io.FileFilter;
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.Charset;
|
||||
import java.util.*;
|
||||
|
||||
/**
|
||||
* An implementation of {@link org.apache.activemq.store.PersistenceAdapter} that supports
|
||||
* distribution of destinations across multiple kahaDB persistence adapters
|
||||
|
@ -50,6 +75,7 @@ public class MultiKahaDBPersistenceAdapter extends LockableServiceSupport implem
|
|||
final int LOCAL_FORMAT_ID_MAGIC = Integer.valueOf(System.getProperty("org.apache.activemq.store.kahadb.MultiKahaDBTransactionStore.localXaFormatId", "61616"));
|
||||
|
||||
final class DelegateDestinationMap extends DestinationMap {
|
||||
@Override
|
||||
public void setEntries(List<DestinationMapEntry> entries) {
|
||||
super.setEntries(entries);
|
||||
}
|
||||
|
@ -252,7 +278,7 @@ public class MultiKahaDBPersistenceAdapter extends LockableServiceSupport implem
|
|||
}
|
||||
if (adapter instanceof PersistenceAdapter) {
|
||||
adapter.removeQueueMessageStore(destination);
|
||||
removeMessageStore((PersistenceAdapter)adapter, destination);
|
||||
removeMessageStore(adapter, destination);
|
||||
destinationMap.removeAll(destination);
|
||||
}
|
||||
}
|
||||
|
@ -267,7 +293,7 @@ public class MultiKahaDBPersistenceAdapter extends LockableServiceSupport implem
|
|||
}
|
||||
if (adapter instanceof PersistenceAdapter) {
|
||||
adapter.removeTopicMessageStore(destination);
|
||||
removeMessageStore((PersistenceAdapter)adapter, destination);
|
||||
removeMessageStore(adapter, destination);
|
||||
destinationMap.removeAll(destination);
|
||||
}
|
||||
}
|
||||
|
@ -453,6 +479,7 @@ public class MultiKahaDBPersistenceAdapter extends LockableServiceSupport implem
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public BrokerService getBrokerService() {
|
||||
return brokerService;
|
||||
}
|
||||
|
@ -503,4 +530,9 @@ public class MultiKahaDBPersistenceAdapter extends LockableServiceSupport implem
|
|||
locker.configure(this);
|
||||
return locker;
|
||||
}
|
||||
|
||||
@Override
|
||||
public JobSchedulerStore createJobSchedulerStore() throws IOException, UnsupportedOperationException {
|
||||
return new JobSchedulerStoreImpl();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,16 +31,24 @@ import org.apache.activemq.command.MessageAck;
|
|||
import org.apache.activemq.command.MessageId;
|
||||
import org.apache.activemq.command.TransactionId;
|
||||
import org.apache.activemq.command.XATransactionId;
|
||||
import org.apache.activemq.store.*;
|
||||
import org.apache.activemq.store.AbstractMessageStore;
|
||||
import org.apache.activemq.store.ListenableFuture;
|
||||
import org.apache.activemq.store.MessageStore;
|
||||
import org.apache.activemq.store.PersistenceAdapter;
|
||||
import org.apache.activemq.store.ProxyMessageStore;
|
||||
import org.apache.activemq.store.ProxyTopicMessageStore;
|
||||
import org.apache.activemq.store.TopicMessageStore;
|
||||
import org.apache.activemq.store.TransactionRecoveryListener;
|
||||
import org.apache.activemq.store.TransactionStore;
|
||||
import org.apache.activemq.store.kahadb.data.KahaCommitCommand;
|
||||
import org.apache.activemq.store.kahadb.data.KahaEntryType;
|
||||
import org.apache.activemq.store.kahadb.data.KahaPrepareCommand;
|
||||
import org.apache.activemq.store.kahadb.data.KahaTraceCommand;
|
||||
import org.apache.activemq.util.IOHelper;
|
||||
import org.apache.activemq.store.kahadb.disk.journal.Journal;
|
||||
import org.apache.activemq.store.kahadb.disk.journal.Location;
|
||||
import org.apache.activemq.util.DataByteArrayInputStream;
|
||||
import org.apache.activemq.util.DataByteArrayOutputStream;
|
||||
import org.apache.activemq.util.IOHelper;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -186,6 +194,7 @@ public class MultiKahaDBTransactionStore implements TransactionStore {
|
|||
return inflightTransactions.remove(txid);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void prepare(TransactionId txid) throws IOException {
|
||||
Tx tx = getTx(txid);
|
||||
for (TransactionStore store : tx.getStores()) {
|
||||
|
@ -193,6 +202,7 @@ public class MultiKahaDBTransactionStore implements TransactionStore {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void commit(TransactionId txid, boolean wasPrepared, Runnable preCommit, Runnable postCommit)
|
||||
throws IOException {
|
||||
|
||||
|
@ -247,6 +257,7 @@ public class MultiKahaDBTransactionStore implements TransactionStore {
|
|||
return location;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rollback(TransactionId txid) throws IOException {
|
||||
Tx tx = removeTx(txid);
|
||||
if (tx != null) {
|
||||
|
@ -256,6 +267,7 @@ public class MultiKahaDBTransactionStore implements TransactionStore {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void start() throws Exception {
|
||||
journal = new Journal() {
|
||||
@Override
|
||||
|
@ -289,6 +301,7 @@ public class MultiKahaDBTransactionStore implements TransactionStore {
|
|||
return new File(multiKahaDBPersistenceAdapter.getDirectory(), "txStore");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop() throws Exception {
|
||||
journal.close();
|
||||
journal = null;
|
||||
|
@ -334,6 +347,7 @@ public class MultiKahaDBTransactionStore implements TransactionStore {
|
|||
}
|
||||
|
||||
|
||||
@Override
|
||||
public synchronized void recover(final TransactionRecoveryListener listener) throws IOException {
|
||||
|
||||
for (final PersistenceAdapter adapter : multiKahaDBPersistenceAdapter.adapters) {
|
||||
|
|
|
@ -22,12 +22,13 @@ import java.util.ArrayList;
|
|||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.activemq.broker.BrokerService;
|
||||
import org.apache.activemq.broker.BrokerServiceAware;
|
||||
import org.apache.activemq.broker.ConnectionContext;
|
||||
import org.apache.activemq.broker.scheduler.JobSchedulerStore;
|
||||
import org.apache.activemq.command.ActiveMQDestination;
|
||||
import org.apache.activemq.command.ActiveMQQueue;
|
||||
import org.apache.activemq.command.ActiveMQTempQueue;
|
||||
|
@ -51,31 +52,35 @@ import org.apache.activemq.store.TransactionRecoveryListener;
|
|||
import org.apache.activemq.store.TransactionStore;
|
||||
import org.apache.activemq.store.kahadb.data.KahaAddMessageCommand;
|
||||
import org.apache.activemq.store.kahadb.data.KahaDestination;
|
||||
import org.apache.activemq.store.kahadb.data.KahaDestination.DestinationType;
|
||||
import org.apache.activemq.store.kahadb.data.KahaLocation;
|
||||
import org.apache.activemq.store.kahadb.data.KahaRemoveDestinationCommand;
|
||||
import org.apache.activemq.store.kahadb.data.KahaRemoveMessageCommand;
|
||||
import org.apache.activemq.store.kahadb.data.KahaSubscriptionCommand;
|
||||
import org.apache.activemq.store.kahadb.data.KahaDestination.DestinationType;
|
||||
import org.apache.activemq.store.kahadb.disk.journal.Location;
|
||||
import org.apache.activemq.store.kahadb.disk.page.Transaction;
|
||||
import org.apache.activemq.usage.MemoryUsage;
|
||||
import org.apache.activemq.usage.SystemUsage;
|
||||
import org.apache.activemq.util.ByteSequence;
|
||||
import org.apache.activemq.wireformat.WireFormat;
|
||||
import org.apache.activemq.store.kahadb.disk.journal.Location;
|
||||
import org.apache.activemq.store.kahadb.disk.page.Transaction;
|
||||
|
||||
public class TempKahaDBStore extends TempMessageDatabase implements PersistenceAdapter, BrokerServiceAware {
|
||||
|
||||
private final WireFormat wireFormat = new OpenWireFormat();
|
||||
private BrokerService brokerService;
|
||||
|
||||
@Override
|
||||
public void setBrokerName(String brokerName) {
|
||||
}
|
||||
@Override
|
||||
public void setUsageManager(SystemUsage usageManager) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public TransactionStore createTransactionStore() throws IOException {
|
||||
return new TransactionStore(){
|
||||
|
||||
|
||||
@Override
|
||||
public void commit(TransactionId txid, boolean wasPrepared, Runnable preCommit,Runnable postCommit) throws IOException {
|
||||
if (preCommit != null) {
|
||||
preCommit.run();
|
||||
|
@ -85,18 +90,21 @@ public class TempKahaDBStore extends TempMessageDatabase implements PersistenceA
|
|||
postCommit.run();
|
||||
}
|
||||
}
|
||||
@Override
|
||||
public void prepare(TransactionId txid) throws IOException {
|
||||
processPrepare(txid);
|
||||
processPrepare(txid);
|
||||
}
|
||||
@Override
|
||||
public void rollback(TransactionId txid) throws IOException {
|
||||
processRollback(txid);
|
||||
processRollback(txid);
|
||||
}
|
||||
@Override
|
||||
public void recover(TransactionRecoveryListener listener) throws IOException {
|
||||
for (Map.Entry<TransactionId, ArrayList<Operation>> entry : preparedTransactions.entrySet()) {
|
||||
XATransactionId xid = (XATransactionId)entry.getKey();
|
||||
ArrayList<Message> messageList = new ArrayList<Message>();
|
||||
ArrayList<MessageAck> ackList = new ArrayList<MessageAck>();
|
||||
|
||||
|
||||
for (Operation op : entry.getValue()) {
|
||||
if( op.getClass() == AddOpperation.class ) {
|
||||
AddOpperation addOp = (AddOpperation)op;
|
||||
|
@ -108,7 +116,7 @@ public class TempKahaDBStore extends TempMessageDatabase implements PersistenceA
|
|||
ackList.add(ack);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Message[] addedMessages = new Message[messageList.size()];
|
||||
MessageAck[] acks = new MessageAck[ackList.size()];
|
||||
messageList.toArray(addedMessages);
|
||||
|
@ -116,8 +124,10 @@ public class TempKahaDBStore extends TempMessageDatabase implements PersistenceA
|
|||
listener.recover(xid, addedMessages, acks);
|
||||
}
|
||||
}
|
||||
@Override
|
||||
public void start() throws Exception {
|
||||
}
|
||||
@Override
|
||||
public void stop() throws Exception {
|
||||
}
|
||||
};
|
||||
|
@ -136,13 +146,15 @@ public class TempKahaDBStore extends TempMessageDatabase implements PersistenceA
|
|||
return destination;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addMessage(ConnectionContext context, Message message) throws IOException {
|
||||
KahaAddMessageCommand command = new KahaAddMessageCommand();
|
||||
command.setDestination(dest);
|
||||
command.setMessageId(message.getMessageId().toProducerKey());
|
||||
processAdd(command, message.getTransactionId(), wireFormat.marshal(message));
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void removeMessage(ConnectionContext context, MessageAck ack) throws IOException {
|
||||
KahaRemoveMessageCommand command = new KahaRemoveMessageCommand();
|
||||
command.setDestination(dest);
|
||||
|
@ -150,20 +162,23 @@ public class TempKahaDBStore extends TempMessageDatabase implements PersistenceA
|
|||
processRemove(command, ack.getTransactionId());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeAllMessages(ConnectionContext context) throws IOException {
|
||||
KahaRemoveDestinationCommand command = new KahaRemoveDestinationCommand();
|
||||
command.setDestination(dest);
|
||||
process(command);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Message getMessage(MessageId identity) throws IOException {
|
||||
final String key = identity.toProducerKey();
|
||||
|
||||
|
||||
// Hopefully one day the page file supports concurrent read operations... but for now we must
|
||||
// externally synchronize...
|
||||
ByteSequence data;
|
||||
synchronized(indexMutex) {
|
||||
data = pageFile.tx().execute(new Transaction.CallableClosure<ByteSequence, IOException>(){
|
||||
@Override
|
||||
public ByteSequence execute(Transaction tx) throws IOException {
|
||||
StoredDestination sd = getStoredDestination(dest, tx);
|
||||
Long sequence = sd.messageIdIndex.get(tx, key);
|
||||
|
@ -177,14 +192,16 @@ public class TempKahaDBStore extends TempMessageDatabase implements PersistenceA
|
|||
if( data == null ) {
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
Message msg = (Message)wireFormat.unmarshal( data );
|
||||
return msg;
|
||||
return msg;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public int getMessageCount() throws IOException {
|
||||
synchronized(indexMutex) {
|
||||
return pageFile.tx().execute(new Transaction.CallableClosure<Integer, IOException>(){
|
||||
@Override
|
||||
public Integer execute(Transaction tx) throws IOException {
|
||||
// Iterate through all index entries to get a count of messages in the destination.
|
||||
StoredDestination sd = getStoredDestination(dest, tx);
|
||||
|
@ -199,9 +216,11 @@ public class TempKahaDBStore extends TempMessageDatabase implements PersistenceA
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void recover(final MessageRecoveryListener listener) throws Exception {
|
||||
synchronized(indexMutex) {
|
||||
pageFile.tx().execute(new Transaction.Closure<Exception>(){
|
||||
@Override
|
||||
public void execute(Transaction tx) throws Exception {
|
||||
StoredDestination sd = getStoredDestination(dest, tx);
|
||||
for (Iterator<Entry<Long, MessageRecord>> iterator = sd.orderIndex.iterator(tx); iterator.hasNext();) {
|
||||
|
@ -214,10 +233,12 @@ public class TempKahaDBStore extends TempMessageDatabase implements PersistenceA
|
|||
}
|
||||
|
||||
long cursorPos=0;
|
||||
|
||||
|
||||
@Override
|
||||
public void recoverNextMessages(final int maxReturned, final MessageRecoveryListener listener) throws Exception {
|
||||
synchronized(indexMutex) {
|
||||
pageFile.tx().execute(new Transaction.Closure<Exception>(){
|
||||
@Override
|
||||
public void execute(Transaction tx) throws Exception {
|
||||
StoredDestination sd = getStoredDestination(dest, tx);
|
||||
Entry<Long, MessageRecord> entry=null;
|
||||
|
@ -238,20 +259,22 @@ public class TempKahaDBStore extends TempMessageDatabase implements PersistenceA
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void resetBatching() {
|
||||
cursorPos=0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@Override
|
||||
public void setBatch(MessageId identity) throws IOException {
|
||||
final String key = identity.toProducerKey();
|
||||
|
||||
|
||||
// Hopefully one day the page file supports concurrent read operations... but for now we must
|
||||
// externally synchronize...
|
||||
Long location;
|
||||
synchronized(indexMutex) {
|
||||
location = pageFile.tx().execute(new Transaction.CallableClosure<Long, IOException>(){
|
||||
@Override
|
||||
public Long execute(Transaction tx) throws IOException {
|
||||
StoredDestination sd = getStoredDestination(dest, tx);
|
||||
return sd.messageIdIndex.get(tx, key);
|
||||
|
@ -261,7 +284,7 @@ public class TempKahaDBStore extends TempMessageDatabase implements PersistenceA
|
|||
if( location!=null ) {
|
||||
cursorPos=location+1;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -273,14 +296,15 @@ public class TempKahaDBStore extends TempMessageDatabase implements PersistenceA
|
|||
@Override
|
||||
public void stop() throws Exception {
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
class KahaDBTopicMessageStore extends KahaDBMessageStore implements TopicMessageStore {
|
||||
public KahaDBTopicMessageStore(ActiveMQTopic destination) {
|
||||
super(destination);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void acknowledge(ConnectionContext context, String clientId, String subscriptionName,
|
||||
MessageId messageId, MessageAck ack) throws IOException {
|
||||
KahaRemoveMessageCommand command = new KahaRemoveMessageCommand();
|
||||
|
@ -294,6 +318,7 @@ public class TempKahaDBStore extends TempMessageDatabase implements PersistenceA
|
|||
processRemove(command, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addSubscription(SubscriptionInfo subscriptionInfo, boolean retroactive) throws IOException {
|
||||
String subscriptionKey = subscriptionKey(subscriptionInfo.getClientId(), subscriptionInfo.getSubscriptionName());
|
||||
KahaSubscriptionCommand command = new KahaSubscriptionCommand();
|
||||
|
@ -305,6 +330,7 @@ public class TempKahaDBStore extends TempMessageDatabase implements PersistenceA
|
|||
process(command);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteSubscription(String clientId, String subscriptionName) throws IOException {
|
||||
KahaSubscriptionCommand command = new KahaSubscriptionCommand();
|
||||
command.setDestination(dest);
|
||||
|
@ -312,11 +338,13 @@ public class TempKahaDBStore extends TempMessageDatabase implements PersistenceA
|
|||
process(command);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SubscriptionInfo[] getAllSubscriptions() throws IOException {
|
||||
|
||||
|
||||
final ArrayList<SubscriptionInfo> subscriptions = new ArrayList<SubscriptionInfo>();
|
||||
synchronized(indexMutex) {
|
||||
pageFile.tx().execute(new Transaction.Closure<IOException>(){
|
||||
@Override
|
||||
public void execute(Transaction tx) throws IOException {
|
||||
StoredDestination sd = getStoredDestination(dest, tx);
|
||||
for (Iterator<Entry<String, KahaSubscriptionCommand>> iterator = sd.subscriptions.iterator(tx); iterator.hasNext();) {
|
||||
|
@ -328,16 +356,18 @@ public class TempKahaDBStore extends TempMessageDatabase implements PersistenceA
|
|||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
SubscriptionInfo[]rc=new SubscriptionInfo[subscriptions.size()];
|
||||
subscriptions.toArray(rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SubscriptionInfo lookupSubscription(String clientId, String subscriptionName) throws IOException {
|
||||
final String subscriptionKey = subscriptionKey(clientId, subscriptionName);
|
||||
synchronized(indexMutex) {
|
||||
return pageFile.tx().execute(new Transaction.CallableClosure<SubscriptionInfo, IOException>(){
|
||||
@Override
|
||||
public SubscriptionInfo execute(Transaction tx) throws IOException {
|
||||
StoredDestination sd = getStoredDestination(dest, tx);
|
||||
KahaSubscriptionCommand command = sd.subscriptions.get(tx, subscriptionKey);
|
||||
|
@ -349,11 +379,13 @@ public class TempKahaDBStore extends TempMessageDatabase implements PersistenceA
|
|||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public int getMessageCount(String clientId, String subscriptionName) throws IOException {
|
||||
final String subscriptionKey = subscriptionKey(clientId, subscriptionName);
|
||||
synchronized(indexMutex) {
|
||||
return pageFile.tx().execute(new Transaction.CallableClosure<Integer, IOException>(){
|
||||
@Override
|
||||
public Integer execute(Transaction tx) throws IOException {
|
||||
StoredDestination sd = getStoredDestination(dest, tx);
|
||||
Long cursorPos = sd.subscriptionAcks.get(tx, subscriptionKey);
|
||||
|
@ -362,7 +394,7 @@ public class TempKahaDBStore extends TempMessageDatabase implements PersistenceA
|
|||
return 0;
|
||||
}
|
||||
cursorPos += 1;
|
||||
|
||||
|
||||
int counter = 0;
|
||||
for (Iterator<Entry<Long, MessageRecord>> iterator = sd.orderIndex.iterator(tx, cursorPos); iterator.hasNext();) {
|
||||
iterator.next();
|
||||
|
@ -371,18 +403,20 @@ public class TempKahaDBStore extends TempMessageDatabase implements PersistenceA
|
|||
return counter;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void recoverSubscription(String clientId, String subscriptionName, final MessageRecoveryListener listener) throws Exception {
|
||||
final String subscriptionKey = subscriptionKey(clientId, subscriptionName);
|
||||
synchronized(indexMutex) {
|
||||
pageFile.tx().execute(new Transaction.Closure<Exception>(){
|
||||
@Override
|
||||
public void execute(Transaction tx) throws Exception {
|
||||
StoredDestination sd = getStoredDestination(dest, tx);
|
||||
Long cursorPos = sd.subscriptionAcks.get(tx, subscriptionKey);
|
||||
cursorPos += 1;
|
||||
|
||||
|
||||
for (Iterator<Entry<Long, MessageRecord>> iterator = sd.orderIndex.iterator(tx, cursorPos); iterator.hasNext();) {
|
||||
Entry<Long, MessageRecord> entry = iterator.next();
|
||||
listener.recoverMessage( (Message) wireFormat.unmarshal(entry.getValue().data ) );
|
||||
|
@ -392,10 +426,12 @@ public class TempKahaDBStore extends TempMessageDatabase implements PersistenceA
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void recoverNextMessages(String clientId, String subscriptionName, final int maxReturned, final MessageRecoveryListener listener) throws Exception {
|
||||
final String subscriptionKey = subscriptionKey(clientId, subscriptionName);
|
||||
synchronized(indexMutex) {
|
||||
pageFile.tx().execute(new Transaction.Closure<Exception>(){
|
||||
@Override
|
||||
public void execute(Transaction tx) throws Exception {
|
||||
StoredDestination sd = getStoredDestination(dest, tx);
|
||||
Long cursorPos = sd.subscriptionCursors.get(subscriptionKey);
|
||||
|
@ -403,7 +439,7 @@ public class TempKahaDBStore extends TempMessageDatabase implements PersistenceA
|
|||
cursorPos = sd.subscriptionAcks.get(tx, subscriptionKey);
|
||||
cursorPos += 1;
|
||||
}
|
||||
|
||||
|
||||
Entry<Long, MessageRecord> entry=null;
|
||||
int counter = 0;
|
||||
for (Iterator<Entry<Long, MessageRecord>> iterator = sd.orderIndex.iterator(tx, cursorPos); iterator.hasNext();) {
|
||||
|
@ -422,11 +458,13 @@ public class TempKahaDBStore extends TempMessageDatabase implements PersistenceA
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void resetBatching(String clientId, String subscriptionName) {
|
||||
try {
|
||||
final String subscriptionKey = subscriptionKey(clientId, subscriptionName);
|
||||
synchronized(indexMutex) {
|
||||
pageFile.tx().execute(new Transaction.Closure<IOException>(){
|
||||
@Override
|
||||
public void execute(Transaction tx) throws IOException {
|
||||
StoredDestination sd = getStoredDestination(dest, tx);
|
||||
sd.subscriptionCursors.remove(subscriptionKey);
|
||||
|
@ -442,11 +480,13 @@ public class TempKahaDBStore extends TempMessageDatabase implements PersistenceA
|
|||
String subscriptionKey(String clientId, String subscriptionName){
|
||||
return clientId+":"+subscriptionName;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public MessageStore createQueueMessageStore(ActiveMQQueue destination) throws IOException {
|
||||
return new KahaDBMessageStore(destination);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TopicMessageStore createTopicMessageStore(ActiveMQTopic destination) throws IOException {
|
||||
return new KahaDBTopicMessageStore(destination);
|
||||
}
|
||||
|
@ -457,6 +497,7 @@ public class TempKahaDBStore extends TempMessageDatabase implements PersistenceA
|
|||
*
|
||||
* @param destination Destination to forget
|
||||
*/
|
||||
@Override
|
||||
public void removeQueueMessageStore(ActiveMQQueue destination) {
|
||||
}
|
||||
|
||||
|
@ -466,18 +507,22 @@ public class TempKahaDBStore extends TempMessageDatabase implements PersistenceA
|
|||
*
|
||||
* @param destination Destination to forget
|
||||
*/
|
||||
@Override
|
||||
public void removeTopicMessageStore(ActiveMQTopic destination) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteAllMessages() throws IOException {
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
@Override
|
||||
public Set<ActiveMQDestination> getDestinations() {
|
||||
try {
|
||||
final HashSet<ActiveMQDestination> rc = new HashSet<ActiveMQDestination>();
|
||||
synchronized(indexMutex) {
|
||||
pageFile.tx().execute(new Transaction.Closure<IOException>(){
|
||||
@Override
|
||||
public void execute(Transaction tx) throws IOException {
|
||||
for (Iterator<Entry<String, StoredDestination>> iterator = destinations.iterator(tx); iterator.hasNext();) {
|
||||
Entry<String, StoredDestination> entry = iterator.next();
|
||||
|
@ -491,11 +536,13 @@ public class TempKahaDBStore extends TempMessageDatabase implements PersistenceA
|
|||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public long getLastMessageBrokerSequenceId() throws IOException {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public long size() {
|
||||
if ( !started.get() ) {
|
||||
return 0;
|
||||
|
@ -507,32 +554,36 @@ public class TempKahaDBStore extends TempMessageDatabase implements PersistenceA
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void beginTransaction(ConnectionContext context) throws IOException {
|
||||
throw new IOException("Not yet implemented.");
|
||||
}
|
||||
@Override
|
||||
public void commitTransaction(ConnectionContext context) throws IOException {
|
||||
throw new IOException("Not yet implemented.");
|
||||
}
|
||||
@Override
|
||||
public void rollbackTransaction(ConnectionContext context) throws IOException {
|
||||
throw new IOException("Not yet implemented.");
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void checkpoint(boolean sync) throws IOException {
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////
|
||||
// Internal conversion methods.
|
||||
///////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
KahaLocation convert(Location location) {
|
||||
KahaLocation rc = new KahaLocation();
|
||||
rc.setLogId(location.getDataFileId());
|
||||
rc.setOffset(location.getOffset());
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
||||
KahaDestination convert(ActiveMQDestination dest) {
|
||||
KahaDestination rc = new KahaDestination();
|
||||
rc.setName(dest.getPhysicalName());
|
||||
|
@ -561,7 +612,7 @@ public class TempKahaDBStore extends TempMessageDatabase implements PersistenceA
|
|||
}
|
||||
int type = Integer.parseInt(dest.substring(0, p));
|
||||
String name = dest.substring(p+1);
|
||||
|
||||
|
||||
switch( KahaDestination.DestinationType.valueOf(type) ) {
|
||||
case QUEUE:
|
||||
return new ActiveMQQueue(name);
|
||||
|
@ -571,11 +622,12 @@ public class TempKahaDBStore extends TempMessageDatabase implements PersistenceA
|
|||
return new ActiveMQTempQueue(name);
|
||||
case TEMP_TOPIC:
|
||||
return new ActiveMQTempTopic(name);
|
||||
default:
|
||||
default:
|
||||
throw new IllegalArgumentException("Not in the valid destination format");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public long getLastProducerSequenceId(ProducerId id) {
|
||||
return -1;
|
||||
}
|
||||
|
@ -592,4 +644,8 @@ public class TempKahaDBStore extends TempMessageDatabase implements PersistenceA
|
|||
}
|
||||
super.load();
|
||||
}
|
||||
@Override
|
||||
public JobSchedulerStore createJobSchedulerStore() throws IOException, UnsupportedOperationException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,11 +20,16 @@ import java.io.IOException;
|
|||
|
||||
import org.apache.activemq.store.kahadb.data.KahaAckMessageFileMapCommand;
|
||||
import org.apache.activemq.store.kahadb.data.KahaAddMessageCommand;
|
||||
import org.apache.activemq.store.kahadb.data.KahaAddScheduledJobCommand;
|
||||
import org.apache.activemq.store.kahadb.data.KahaCommitCommand;
|
||||
import org.apache.activemq.store.kahadb.data.KahaDestroySchedulerCommand;
|
||||
import org.apache.activemq.store.kahadb.data.KahaPrepareCommand;
|
||||
import org.apache.activemq.store.kahadb.data.KahaProducerAuditCommand;
|
||||
import org.apache.activemq.store.kahadb.data.KahaRemoveDestinationCommand;
|
||||
import org.apache.activemq.store.kahadb.data.KahaRemoveMessageCommand;
|
||||
import org.apache.activemq.store.kahadb.data.KahaRemoveScheduledJobCommand;
|
||||
import org.apache.activemq.store.kahadb.data.KahaRemoveScheduledJobsCommand;
|
||||
import org.apache.activemq.store.kahadb.data.KahaRescheduleJobCommand;
|
||||
import org.apache.activemq.store.kahadb.data.KahaRollbackCommand;
|
||||
import org.apache.activemq.store.kahadb.data.KahaSubscriptionCommand;
|
||||
import org.apache.activemq.store.kahadb.data.KahaTraceCommand;
|
||||
|
@ -62,6 +67,21 @@ public class Visitor {
|
|||
public void visit(KahaAckMessageFileMapCommand kahaProducerAuditCommand) throws IOException {
|
||||
}
|
||||
|
||||
public void visit(KahaAddScheduledJobCommand kahaAddScheduledJobCommand) throws IOException {
|
||||
}
|
||||
|
||||
public void visit(KahaRescheduleJobCommand KahaRescheduleJobCommand) throws IOException {
|
||||
}
|
||||
|
||||
public void visit(KahaRemoveScheduledJobCommand kahaRemoveScheduledJobCommand) throws IOException {
|
||||
}
|
||||
|
||||
public void visit(KahaRemoveScheduledJobsCommand kahaRemoveScheduledJobsCommand) throws IOException {
|
||||
}
|
||||
|
||||
public void visit(KahaDestroySchedulerCommand KahaDestroySchedulerCommand) throws IOException {
|
||||
}
|
||||
|
||||
public void visit(KahaUpdateMessageCommand kahaUpdateMessageCommand) throws IOException {
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,8 +25,8 @@ public class JobImpl implements Job {
|
|||
private final JobLocation jobLocation;
|
||||
private final byte[] payload;
|
||||
|
||||
protected JobImpl(JobLocation location,ByteSequence bs) {
|
||||
this.jobLocation=location;
|
||||
protected JobImpl(JobLocation location, ByteSequence bs) {
|
||||
this.jobLocation = location;
|
||||
this.payload = new byte[bs.getLength()];
|
||||
System.arraycopy(bs.getData(), bs.getOffset(), this.payload, 0, bs.getLength());
|
||||
}
|
||||
|
@ -38,22 +38,22 @@ public class JobImpl implements Job {
|
|||
|
||||
@Override
|
||||
public byte[] getPayload() {
|
||||
return this.payload;
|
||||
return this.payload;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getPeriod() {
|
||||
return this.jobLocation.getPeriod();
|
||||
return this.jobLocation.getPeriod();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getRepeat() {
|
||||
return this.jobLocation.getRepeat();
|
||||
return this.jobLocation.getRepeat();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getStart() {
|
||||
return this.jobLocation.getStartTime();
|
||||
return this.jobLocation.getStartTime();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -76,4 +76,13 @@ public class JobImpl implements Job {
|
|||
return JobSupport.getDateTime(getStart());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getExecutionCount() {
|
||||
return this.jobLocation.getRescheduledCount();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Job: " + getJobId();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,6 +36,8 @@ class JobLocation {
|
|||
private long period;
|
||||
private String cronEntry;
|
||||
private final Location location;
|
||||
private int rescheduledCount;
|
||||
private Location lastUpdate;
|
||||
|
||||
public JobLocation(Location location) {
|
||||
this.location = location;
|
||||
|
@ -52,8 +54,12 @@ class JobLocation {
|
|||
this.delay = in.readLong();
|
||||
this.nextTime = in.readLong();
|
||||
this.period = in.readLong();
|
||||
this.cronEntry=in.readUTF();
|
||||
this.cronEntry = in.readUTF();
|
||||
this.location.readExternal(in);
|
||||
if (in.readBoolean()) {
|
||||
this.lastUpdate = new Location();
|
||||
this.lastUpdate.readExternal(in);
|
||||
}
|
||||
}
|
||||
|
||||
public void writeExternal(DataOutput out) throws IOException {
|
||||
|
@ -63,11 +69,17 @@ class JobLocation {
|
|||
out.writeLong(this.delay);
|
||||
out.writeLong(this.nextTime);
|
||||
out.writeLong(this.period);
|
||||
if (this.cronEntry==null) {
|
||||
this.cronEntry="";
|
||||
if (this.cronEntry == null) {
|
||||
this.cronEntry = "";
|
||||
}
|
||||
out.writeUTF(this.cronEntry);
|
||||
this.location.writeExternal(out);
|
||||
if (lastUpdate != null) {
|
||||
out.writeBoolean(true);
|
||||
this.lastUpdate.writeExternal(out);
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -123,7 +135,8 @@ class JobLocation {
|
|||
}
|
||||
|
||||
/**
|
||||
* @param nextTime the nextTime to set
|
||||
* @param nextTime
|
||||
* the nextTime to set
|
||||
*/
|
||||
public synchronized void setNextTime(long nextTime) {
|
||||
this.nextTime = nextTime;
|
||||
|
@ -152,7 +165,8 @@ class JobLocation {
|
|||
}
|
||||
|
||||
/**
|
||||
* @param cronEntry the cronEntry to set
|
||||
* @param cronEntry
|
||||
* the cronEntry to set
|
||||
*/
|
||||
public synchronized void setCronEntry(String cronEntry) {
|
||||
this.cronEntry = cronEntry;
|
||||
|
@ -173,7 +187,8 @@ class JobLocation {
|
|||
}
|
||||
|
||||
/**
|
||||
* @param delay the delay to set
|
||||
* @param delay
|
||||
* the delay to set
|
||||
*/
|
||||
public void setDelay(long delay) {
|
||||
this.delay = delay;
|
||||
|
@ -186,15 +201,55 @@ class JobLocation {
|
|||
return this.location;
|
||||
}
|
||||
|
||||
/**
|
||||
* @returns the location in the journal of the last update issued for this
|
||||
* Job.
|
||||
*/
|
||||
public Location getLastUpdate() {
|
||||
return this.lastUpdate;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the location of the last update command written to the journal for
|
||||
* this Job. The update commands set the next execution time for this job.
|
||||
* We need to keep track of only the latest update as it's the only one we
|
||||
* really need to recover the correct state from the journal.
|
||||
*
|
||||
* @param location
|
||||
* The location in the journal of the last update command.
|
||||
*/
|
||||
public void setLastUpdate(Location location) {
|
||||
this.lastUpdate = location;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the number of time this job has been rescheduled.
|
||||
*/
|
||||
public int getRescheduledCount() {
|
||||
return rescheduledCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the number of time this job has been rescheduled. A newly added job will return
|
||||
* zero and increment this value each time a scheduled message is dispatched to its
|
||||
* target destination and the job is rescheduled for another cycle.
|
||||
*
|
||||
* @param executionCount
|
||||
* the new execution count to assign the JobLocation.
|
||||
*/
|
||||
public void setRescheduledCount(int rescheduledCount) {
|
||||
this.rescheduledCount = rescheduledCount;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Job [id=" + jobId + ", startTime=" + new Date(startTime)
|
||||
+ ", delay=" + delay + ", period=" + period + ", repeat="
|
||||
+ repeat + ", nextTime=" + new Date(nextTime) + "]";
|
||||
return "Job [id=" + jobId + ", startTime=" + new Date(startTime) + ", delay=" + delay + ", period=" + period + ", repeat=" + repeat + ", nextTime="
|
||||
+ new Date(nextTime) + ", executionCount = " + (rescheduledCount + 1) + "]";
|
||||
}
|
||||
|
||||
static class JobLocationMarshaller extends VariableMarshaller<List<JobLocation>> {
|
||||
static final JobLocationMarshaller INSTANCE = new JobLocationMarshaller();
|
||||
|
||||
@Override
|
||||
public List<JobLocation> readPayload(DataInput dataIn) throws IOException {
|
||||
List<JobLocation> result = new ArrayList<JobLocation>();
|
||||
|
@ -228,6 +283,7 @@ class JobLocation {
|
|||
result = prime * result + (int) (period ^ (period >>> 32));
|
||||
result = prime * result + repeat;
|
||||
result = prime * result + (int) (startTime ^ (startTime >>> 32));
|
||||
result = prime * result + (rescheduledCount ^ (rescheduledCount >>> 32));
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -286,6 +342,9 @@ class JobLocation {
|
|||
if (startTime != other.startTime) {
|
||||
return false;
|
||||
}
|
||||
if (rescheduledCount != other.rescheduledCount) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.activemq.store.kahadb.scheduler;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.activemq.store.kahadb.disk.util.VariableMarshaller;
|
||||
|
||||
/**
|
||||
* A VariableMarshaller instance that performs the read and write of a list of
|
||||
* JobLocation objects using the JobLocation's built in read and write methods.
|
||||
*/
|
||||
class JobLocationsMarshaller extends VariableMarshaller<List<JobLocation>> {
|
||||
static JobLocationsMarshaller INSTANCE = new JobLocationsMarshaller();
|
||||
|
||||
@Override
|
||||
public List<JobLocation> readPayload(DataInput dataIn) throws IOException {
|
||||
List<JobLocation> result = new ArrayList<JobLocation>();
|
||||
int size = dataIn.readInt();
|
||||
for (int i = 0; i < size; i++) {
|
||||
JobLocation jobLocation = new JobLocation();
|
||||
jobLocation.readExternal(dataIn);
|
||||
result.add(jobLocation);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writePayload(List<JobLocation> value, DataOutput dataOut) throws IOException {
|
||||
dataOut.writeInt(value.size());
|
||||
for (JobLocation jobLocation : value) {
|
||||
jobLocation.writeExternal(dataOut);
|
||||
}
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,246 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.activemq.store.kahadb.scheduler;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.UUID;
|
||||
|
||||
import org.apache.activemq.store.kahadb.AbstractKahaDBMetaData;
|
||||
import org.apache.activemq.store.kahadb.disk.index.BTreeIndex;
|
||||
import org.apache.activemq.store.kahadb.disk.page.Transaction;
|
||||
import org.apache.activemq.store.kahadb.disk.util.IntegerMarshaller;
|
||||
import org.apache.activemq.store.kahadb.disk.util.LocationMarshaller;
|
||||
import org.apache.activemq.store.kahadb.disk.util.StringMarshaller;
|
||||
import org.apache.activemq.store.kahadb.disk.util.VariableMarshaller;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* The KahaDB MetaData used to house the Index data for the KahaDB implementation
|
||||
* of a JobSchedulerStore.
|
||||
*/
|
||||
public class JobSchedulerKahaDBMetaData extends AbstractKahaDBMetaData<JobSchedulerKahaDBMetaData> {
|
||||
|
||||
static final Logger LOG = LoggerFactory.getLogger(JobSchedulerKahaDBMetaData.class);
|
||||
|
||||
private final JobSchedulerStoreImpl store;
|
||||
|
||||
private UUID token = JobSchedulerStoreImpl.SCHEDULER_STORE_TOKEN;
|
||||
private int version = JobSchedulerStoreImpl.CURRENT_VERSION;
|
||||
|
||||
private BTreeIndex<Integer, List<Integer>> removeLocationTracker;
|
||||
private BTreeIndex<Integer, Integer> journalRC;
|
||||
private BTreeIndex<String, JobSchedulerImpl> storedSchedulers;
|
||||
|
||||
/**
|
||||
* Creates a new instance of this meta data object with the assigned
|
||||
* parent JobSchedulerStore instance.
|
||||
*
|
||||
* @param store
|
||||
* the store instance that owns this meta data.
|
||||
*/
|
||||
public JobSchedulerKahaDBMetaData(JobSchedulerStoreImpl store) {
|
||||
this.store = store;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the current value of the Scheduler store identification token.
|
||||
*/
|
||||
public UUID getToken() {
|
||||
return this.token;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the current value of the version tag for this meta data instance.
|
||||
*/
|
||||
public int getVersion() {
|
||||
return this.version;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the index that contains the location tracking information for Jobs
|
||||
* that have been removed from the index but whose add operation has yet
|
||||
* to be removed from the Journal.
|
||||
*
|
||||
* The Journal log file where a remove command is written cannot be released
|
||||
* until the log file with the original add command has also been released,
|
||||
* otherwise on a log replay the scheduled job could reappear in the scheduler
|
||||
* since its corresponding remove might no longer be present.
|
||||
*
|
||||
* @return the remove command location tracker index.
|
||||
*/
|
||||
public BTreeIndex<Integer, List<Integer>> getRemoveLocationTracker() {
|
||||
return this.removeLocationTracker;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the index used to track the number of reference to a Journal log file.
|
||||
*
|
||||
* A log file in the Journal can only be considered for removal after all the
|
||||
* references to it have been released.
|
||||
*
|
||||
* @return the journal log file reference counter index.
|
||||
*/
|
||||
public BTreeIndex<Integer, Integer> getJournalRC() {
|
||||
return this.journalRC;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the index of JobScheduler instances that have been created and stored
|
||||
* in the JobSchedulerStore instance.
|
||||
*
|
||||
* @return the index of stored JobScheduler instances.
|
||||
*/
|
||||
public BTreeIndex<String, JobSchedulerImpl> getJobSchedulers() {
|
||||
return this.storedSchedulers;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void initialize(Transaction tx) throws IOException {
|
||||
this.storedSchedulers = new BTreeIndex<String, JobSchedulerImpl>(store.getPageFile(), tx.allocate().getPageId());
|
||||
this.journalRC = new BTreeIndex<Integer, Integer>(store.getPageFile(), tx.allocate().getPageId());
|
||||
this.removeLocationTracker = new BTreeIndex<Integer, List<Integer>>(store.getPageFile(), tx.allocate().getPageId());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void load(Transaction tx) throws IOException {
|
||||
this.storedSchedulers.setKeyMarshaller(StringMarshaller.INSTANCE);
|
||||
this.storedSchedulers.setValueMarshaller(new JobSchedulerMarshaller(this.store));
|
||||
this.storedSchedulers.load(tx);
|
||||
this.journalRC.setKeyMarshaller(IntegerMarshaller.INSTANCE);
|
||||
this.journalRC.setValueMarshaller(IntegerMarshaller.INSTANCE);
|
||||
this.journalRC.load(tx);
|
||||
this.removeLocationTracker.setKeyMarshaller(IntegerMarshaller.INSTANCE);
|
||||
this.removeLocationTracker.setValueMarshaller(new IntegerListMarshaller());
|
||||
this.removeLocationTracker.load(tx);
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads all the stored JobScheduler instances into the provided map.
|
||||
*
|
||||
* @param tx
|
||||
* the Transaction under which the load operation should be executed.
|
||||
* @param schedulers
|
||||
* a Map<String, JobSchedulerImpl> into which the loaded schedulers are stored.
|
||||
*
|
||||
* @throws IOException if an error occurs while performing the load operation.
|
||||
*/
|
||||
public void loadScheduler(Transaction tx, Map<String, JobSchedulerImpl> schedulers) throws IOException {
|
||||
for (Iterator<Entry<String, JobSchedulerImpl>> i = this.storedSchedulers.iterator(tx); i.hasNext();) {
|
||||
Entry<String, JobSchedulerImpl> entry = i.next();
|
||||
entry.getValue().load(tx);
|
||||
schedulers.put(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(DataInput in) throws IOException {
|
||||
try {
|
||||
long msb = in.readLong();
|
||||
long lsb = in.readLong();
|
||||
this.token = new UUID(msb, lsb);
|
||||
} catch (Exception e) {
|
||||
throw new UnknownStoreVersionException(e);
|
||||
}
|
||||
|
||||
if (!token.equals(JobSchedulerStoreImpl.SCHEDULER_STORE_TOKEN)) {
|
||||
throw new UnknownStoreVersionException(token.toString());
|
||||
}
|
||||
this.version = in.readInt();
|
||||
if (in.readBoolean()) {
|
||||
setLastUpdateLocation(LocationMarshaller.INSTANCE.readPayload(in));
|
||||
} else {
|
||||
setLastUpdateLocation(null);
|
||||
}
|
||||
this.storedSchedulers = new BTreeIndex<String, JobSchedulerImpl>(store.getPageFile(), in.readLong());
|
||||
this.storedSchedulers.setKeyMarshaller(StringMarshaller.INSTANCE);
|
||||
this.storedSchedulers.setValueMarshaller(new JobSchedulerMarshaller(this.store));
|
||||
this.journalRC = new BTreeIndex<Integer, Integer>(store.getPageFile(), in.readLong());
|
||||
this.journalRC.setKeyMarshaller(IntegerMarshaller.INSTANCE);
|
||||
this.journalRC.setValueMarshaller(IntegerMarshaller.INSTANCE);
|
||||
this.removeLocationTracker = new BTreeIndex<Integer, List<Integer>>(store.getPageFile(), in.readLong());
|
||||
this.removeLocationTracker.setKeyMarshaller(IntegerMarshaller.INSTANCE);
|
||||
this.removeLocationTracker.setValueMarshaller(new IntegerListMarshaller());
|
||||
|
||||
LOG.info("Scheduler Store version {} loaded", this.version);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(DataOutput out) throws IOException {
|
||||
out.writeLong(this.token.getMostSignificantBits());
|
||||
out.writeLong(this.token.getLeastSignificantBits());
|
||||
out.writeInt(this.version);
|
||||
if (getLastUpdateLocation() != null) {
|
||||
out.writeBoolean(true);
|
||||
LocationMarshaller.INSTANCE.writePayload(getLastUpdateLocation(), out);
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
out.writeLong(this.storedSchedulers.getPageId());
|
||||
out.writeLong(this.journalRC.getPageId());
|
||||
out.writeLong(this.removeLocationTracker.getPageId());
|
||||
}
|
||||
|
||||
private class JobSchedulerMarshaller extends VariableMarshaller<JobSchedulerImpl> {
|
||||
private final JobSchedulerStoreImpl store;
|
||||
|
||||
JobSchedulerMarshaller(JobSchedulerStoreImpl store) {
|
||||
this.store = store;
|
||||
}
|
||||
|
||||
@Override
|
||||
public JobSchedulerImpl readPayload(DataInput dataIn) throws IOException {
|
||||
JobSchedulerImpl result = new JobSchedulerImpl(this.store);
|
||||
result.read(dataIn);
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writePayload(JobSchedulerImpl js, DataOutput dataOut) throws IOException {
|
||||
js.write(dataOut);
|
||||
}
|
||||
}
|
||||
|
||||
private class IntegerListMarshaller extends VariableMarshaller<List<Integer>> {
|
||||
|
||||
@Override
|
||||
public List<Integer> readPayload(DataInput dataIn) throws IOException {
|
||||
List<Integer> result = new ArrayList<Integer>();
|
||||
int size = dataIn.readInt();
|
||||
for (int i = 0; i < size; i++) {
|
||||
result.add(IntegerMarshaller.INSTANCE.readPayload(dataIn));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writePayload(List<Integer> value, DataOutput dataOut) throws IOException {
|
||||
dataOut.writeInt(value.size());
|
||||
for (Integer integer : value) {
|
||||
IntegerMarshaller.INSTANCE.writePayload(integer, dataOut);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,24 @@
|
|||
package org.apache.activemq.store.kahadb.scheduler;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class UnknownStoreVersionException extends IOException {
|
||||
|
||||
private static final long serialVersionUID = -8544753506151157145L;
|
||||
|
||||
private final String token;
|
||||
|
||||
public UnknownStoreVersionException(Throwable cause) {
|
||||
super(cause);
|
||||
this.token = "";
|
||||
}
|
||||
|
||||
public UnknownStoreVersionException(String token) {
|
||||
super("Failed to load Store, found unknown store token: " + token);
|
||||
this.token = token;
|
||||
}
|
||||
|
||||
public String getToken() {
|
||||
return this.token;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.activemq.store.kahadb.scheduler.legacy;
|
||||
|
||||
import org.apache.activemq.protobuf.Buffer;
|
||||
import org.apache.activemq.util.ByteSequence;
|
||||
|
||||
/**
|
||||
* Legacy version Job and Job payload wrapper. Allows for easy replay of stored
|
||||
* legacy jobs into a new JobSchedulerStoreImpl intsance.
|
||||
*/
|
||||
final class LegacyJobImpl {
|
||||
|
||||
private final LegacyJobLocation jobLocation;
|
||||
private final Buffer payload;
|
||||
|
||||
protected LegacyJobImpl(LegacyJobLocation location, ByteSequence payload) {
|
||||
this.jobLocation = location;
|
||||
this.payload = new Buffer(payload.data, payload.offset, payload.length);
|
||||
}
|
||||
|
||||
public String getJobId() {
|
||||
return this.jobLocation.getJobId();
|
||||
}
|
||||
|
||||
public Buffer getPayload() {
|
||||
return this.payload;
|
||||
}
|
||||
|
||||
public long getPeriod() {
|
||||
return this.jobLocation.getPeriod();
|
||||
}
|
||||
|
||||
public int getRepeat() {
|
||||
return this.jobLocation.getRepeat();
|
||||
}
|
||||
|
||||
public long getDelay() {
|
||||
return this.jobLocation.getDelay();
|
||||
}
|
||||
|
||||
public String getCronEntry() {
|
||||
return this.jobLocation.getCronEntry();
|
||||
}
|
||||
|
||||
public long getNextExecutionTime() {
|
||||
return this.jobLocation.getNextTime();
|
||||
}
|
||||
|
||||
public long getStartTime() {
|
||||
return this.jobLocation.getStartTime();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return this.jobLocation.toString();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,296 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.activemq.store.kahadb.scheduler.legacy;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.activemq.store.kahadb.disk.journal.Location;
|
||||
import org.apache.activemq.store.kahadb.disk.util.VariableMarshaller;
|
||||
|
||||
final class LegacyJobLocation {
|
||||
|
||||
private String jobId;
|
||||
private int repeat;
|
||||
private long startTime;
|
||||
private long delay;
|
||||
private long nextTime;
|
||||
private long period;
|
||||
private String cronEntry;
|
||||
private final Location location;
|
||||
|
||||
public LegacyJobLocation(Location location) {
|
||||
this.location = location;
|
||||
}
|
||||
|
||||
public LegacyJobLocation() {
|
||||
this(new Location());
|
||||
}
|
||||
|
||||
public void readExternal(DataInput in) throws IOException {
|
||||
this.jobId = in.readUTF();
|
||||
this.repeat = in.readInt();
|
||||
this.startTime = in.readLong();
|
||||
this.delay = in.readLong();
|
||||
this.nextTime = in.readLong();
|
||||
this.period = in.readLong();
|
||||
this.cronEntry = in.readUTF();
|
||||
this.location.readExternal(in);
|
||||
}
|
||||
|
||||
public void writeExternal(DataOutput out) throws IOException {
|
||||
out.writeUTF(this.jobId);
|
||||
out.writeInt(this.repeat);
|
||||
out.writeLong(this.startTime);
|
||||
out.writeLong(this.delay);
|
||||
out.writeLong(this.nextTime);
|
||||
out.writeLong(this.period);
|
||||
if (this.cronEntry == null) {
|
||||
this.cronEntry = "";
|
||||
}
|
||||
out.writeUTF(this.cronEntry);
|
||||
this.location.writeExternal(out);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the jobId
|
||||
*/
|
||||
public String getJobId() {
|
||||
return this.jobId;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param jobId
|
||||
* the jobId to set
|
||||
*/
|
||||
public void setJobId(String jobId) {
|
||||
this.jobId = jobId;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the repeat
|
||||
*/
|
||||
public int getRepeat() {
|
||||
return this.repeat;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param repeat
|
||||
* the repeat to set
|
||||
*/
|
||||
public void setRepeat(int repeat) {
|
||||
this.repeat = repeat;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the start
|
||||
*/
|
||||
public long getStartTime() {
|
||||
return this.startTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param start
|
||||
* the start to set
|
||||
*/
|
||||
public void setStartTime(long start) {
|
||||
this.startTime = start;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the nextTime
|
||||
*/
|
||||
public synchronized long getNextTime() {
|
||||
return this.nextTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param nextTime
|
||||
* the nextTime to set
|
||||
*/
|
||||
public synchronized void setNextTime(long nextTime) {
|
||||
this.nextTime = nextTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the period
|
||||
*/
|
||||
public long getPeriod() {
|
||||
return this.period;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param period
|
||||
* the period to set
|
||||
*/
|
||||
public void setPeriod(long period) {
|
||||
this.period = period;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the cronEntry
|
||||
*/
|
||||
public synchronized String getCronEntry() {
|
||||
return this.cronEntry;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param cronEntry
|
||||
* the cronEntry to set
|
||||
*/
|
||||
public synchronized void setCronEntry(String cronEntry) {
|
||||
this.cronEntry = cronEntry;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return if this JobLocation represents a cron entry.
|
||||
*/
|
||||
public boolean isCron() {
|
||||
return getCronEntry() != null && getCronEntry().length() > 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the delay
|
||||
*/
|
||||
public long getDelay() {
|
||||
return this.delay;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param delay
|
||||
* the delay to set
|
||||
*/
|
||||
public void setDelay(long delay) {
|
||||
this.delay = delay;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the location
|
||||
*/
|
||||
public Location getLocation() {
|
||||
return this.location;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Job [id=" + jobId + ", startTime=" + new Date(startTime) +
|
||||
", delay=" + delay + ", period=" + period +
|
||||
", repeat=" + repeat + ", nextTime=" + new Date(nextTime) + "]";
|
||||
}
|
||||
|
||||
static class JobLocationMarshaller extends VariableMarshaller<List<LegacyJobLocation>> {
|
||||
static final JobLocationMarshaller INSTANCE = new JobLocationMarshaller();
|
||||
|
||||
@Override
|
||||
public List<LegacyJobLocation> readPayload(DataInput dataIn) throws IOException {
|
||||
List<LegacyJobLocation> result = new ArrayList<LegacyJobLocation>();
|
||||
int size = dataIn.readInt();
|
||||
for (int i = 0; i < size; i++) {
|
||||
LegacyJobLocation jobLocation = new LegacyJobLocation();
|
||||
jobLocation.readExternal(dataIn);
|
||||
result.add(jobLocation);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writePayload(List<LegacyJobLocation> value, DataOutput dataOut) throws IOException {
|
||||
dataOut.writeInt(value.size());
|
||||
for (LegacyJobLocation jobLocation : value) {
|
||||
jobLocation.writeExternal(dataOut);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
final int prime = 31;
|
||||
int result = 1;
|
||||
result = prime * result + ((cronEntry == null) ? 0 : cronEntry.hashCode());
|
||||
result = prime * result + (int) (delay ^ (delay >>> 32));
|
||||
result = prime * result + ((jobId == null) ? 0 : jobId.hashCode());
|
||||
result = prime * result + ((location == null) ? 0 : location.hashCode());
|
||||
result = prime * result + (int) (nextTime ^ (nextTime >>> 32));
|
||||
result = prime * result + (int) (period ^ (period >>> 32));
|
||||
result = prime * result + repeat;
|
||||
result = prime * result + (int) (startTime ^ (startTime >>> 32));
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
LegacyJobLocation other = (LegacyJobLocation) obj;
|
||||
|
||||
if (cronEntry == null) {
|
||||
if (other.cronEntry != null) {
|
||||
return false;
|
||||
}
|
||||
} else if (!cronEntry.equals(other.cronEntry)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (delay != other.delay) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (jobId == null) {
|
||||
if (other.jobId != null)
|
||||
return false;
|
||||
} else if (!jobId.equals(other.jobId)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (location == null) {
|
||||
if (other.location != null) {
|
||||
return false;
|
||||
}
|
||||
} else if (!location.equals(other.location)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (nextTime != other.nextTime) {
|
||||
return false;
|
||||
}
|
||||
if (period != other.period) {
|
||||
return false;
|
||||
}
|
||||
if (repeat != other.repeat) {
|
||||
return false;
|
||||
}
|
||||
if (startTime != other.startTime) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,222 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.activemq.store.kahadb.scheduler.legacy;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.activemq.store.kahadb.disk.index.BTreeIndex;
|
||||
import org.apache.activemq.store.kahadb.disk.journal.Location;
|
||||
import org.apache.activemq.store.kahadb.disk.page.Transaction;
|
||||
import org.apache.activemq.store.kahadb.disk.util.LongMarshaller;
|
||||
import org.apache.activemq.store.kahadb.disk.util.VariableMarshaller;
|
||||
import org.apache.activemq.util.ByteSequence;
|
||||
import org.apache.activemq.util.ServiceStopper;
|
||||
import org.apache.activemq.util.ServiceSupport;
|
||||
|
||||
/**
|
||||
* Read-only view of a stored legacy JobScheduler instance.
|
||||
*/
|
||||
final class LegacyJobSchedulerImpl extends ServiceSupport {
|
||||
|
||||
private final LegacyJobSchedulerStoreImpl store;
|
||||
private String name;
|
||||
private BTreeIndex<Long, List<LegacyJobLocation>> index;
|
||||
|
||||
LegacyJobSchedulerImpl(LegacyJobSchedulerStoreImpl store) {
|
||||
this.store = store;
|
||||
}
|
||||
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return this.name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the next time that a job would be scheduled to run.
|
||||
*
|
||||
* @return time of next scheduled job to run.
|
||||
*
|
||||
* @throws IOException if an error occurs while fetching the time.
|
||||
*/
|
||||
public long getNextScheduleTime() throws IOException {
|
||||
Map.Entry<Long, List<LegacyJobLocation>> first = this.index.getFirst(this.store.getPageFile().tx());
|
||||
return first != null ? first.getKey() : -1l;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the list of the next batch of scheduled jobs in the store.
|
||||
*
|
||||
* @return a list of the next jobs that will run.
|
||||
*
|
||||
* @throws IOException if an error occurs while fetching the jobs list.
|
||||
*/
|
||||
public List<LegacyJobImpl> getNextScheduleJobs() throws IOException {
|
||||
final List<LegacyJobImpl> result = new ArrayList<LegacyJobImpl>();
|
||||
|
||||
this.store.getPageFile().tx().execute(new Transaction.Closure<IOException>() {
|
||||
@Override
|
||||
public void execute(Transaction tx) throws IOException {
|
||||
Map.Entry<Long, List<LegacyJobLocation>> first = index.getFirst(store.getPageFile().tx());
|
||||
if (first != null) {
|
||||
for (LegacyJobLocation jl : first.getValue()) {
|
||||
ByteSequence bs = getPayload(jl.getLocation());
|
||||
LegacyJobImpl job = new LegacyJobImpl(jl, bs);
|
||||
result.add(job);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a list of all scheduled jobs in this store.
|
||||
*
|
||||
* @return a list of all the currently scheduled jobs in this store.
|
||||
*
|
||||
* @throws IOException if an error occurs while fetching the list of jobs.
|
||||
*/
|
||||
public List<LegacyJobImpl> getAllJobs() throws IOException {
|
||||
final List<LegacyJobImpl> result = new ArrayList<LegacyJobImpl>();
|
||||
this.store.getPageFile().tx().execute(new Transaction.Closure<IOException>() {
|
||||
@Override
|
||||
public void execute(Transaction tx) throws IOException {
|
||||
Iterator<Map.Entry<Long, List<LegacyJobLocation>>> iter = index.iterator(store.getPageFile().tx());
|
||||
while (iter.hasNext()) {
|
||||
Map.Entry<Long, List<LegacyJobLocation>> next = iter.next();
|
||||
if (next != null) {
|
||||
for (LegacyJobLocation jl : next.getValue()) {
|
||||
ByteSequence bs = getPayload(jl.getLocation());
|
||||
LegacyJobImpl job = new LegacyJobImpl(jl, bs);
|
||||
result.add(job);
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a list of all scheduled jobs that exist between the given start and end time.
|
||||
*
|
||||
* @param start
|
||||
* The start time to look for scheduled jobs.
|
||||
* @param finish
|
||||
* The end time to stop looking for scheduled jobs.
|
||||
*
|
||||
* @return a list of all scheduled jobs that would run between the given start and end time.
|
||||
*
|
||||
* @throws IOException if an error occurs while fetching the list of jobs.
|
||||
*/
|
||||
public List<LegacyJobImpl> getAllJobs(final long start, final long finish) throws IOException {
|
||||
final List<LegacyJobImpl> result = new ArrayList<LegacyJobImpl>();
|
||||
this.store.getPageFile().tx().execute(new Transaction.Closure<IOException>() {
|
||||
@Override
|
||||
public void execute(Transaction tx) throws IOException {
|
||||
Iterator<Map.Entry<Long, List<LegacyJobLocation>>> iter = index.iterator(store.getPageFile().tx(), start);
|
||||
while (iter.hasNext()) {
|
||||
Map.Entry<Long, List<LegacyJobLocation>> next = iter.next();
|
||||
if (next != null && next.getKey().longValue() <= finish) {
|
||||
for (LegacyJobLocation jl : next.getValue()) {
|
||||
ByteSequence bs = getPayload(jl.getLocation());
|
||||
LegacyJobImpl job = new LegacyJobImpl(jl, bs);
|
||||
result.add(job);
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
ByteSequence getPayload(Location location) throws IllegalStateException, IOException {
|
||||
return this.store.getPayload(location);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "LegacyJobScheduler: " + this.name;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStart() throws Exception {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStop(ServiceStopper stopper) throws Exception {
|
||||
}
|
||||
|
||||
void createIndexes(Transaction tx) throws IOException {
|
||||
this.index = new BTreeIndex<Long, List<LegacyJobLocation>>(this.store.getPageFile(), tx.allocate().getPageId());
|
||||
}
|
||||
|
||||
void load(Transaction tx) throws IOException {
|
||||
this.index.setKeyMarshaller(LongMarshaller.INSTANCE);
|
||||
this.index.setValueMarshaller(ValueMarshaller.INSTANCE);
|
||||
this.index.load(tx);
|
||||
}
|
||||
|
||||
void read(DataInput in) throws IOException {
|
||||
this.name = in.readUTF();
|
||||
this.index = new BTreeIndex<Long, List<LegacyJobLocation>>(this.store.getPageFile(), in.readLong());
|
||||
this.index.setKeyMarshaller(LongMarshaller.INSTANCE);
|
||||
this.index.setValueMarshaller(ValueMarshaller.INSTANCE);
|
||||
}
|
||||
|
||||
public void write(DataOutput out) throws IOException {
|
||||
out.writeUTF(name);
|
||||
out.writeLong(this.index.getPageId());
|
||||
}
|
||||
|
||||
static class ValueMarshaller extends VariableMarshaller<List<LegacyJobLocation>> {
|
||||
static ValueMarshaller INSTANCE = new ValueMarshaller();
|
||||
|
||||
@Override
|
||||
public List<LegacyJobLocation> readPayload(DataInput dataIn) throws IOException {
|
||||
List<LegacyJobLocation> result = new ArrayList<LegacyJobLocation>();
|
||||
int size = dataIn.readInt();
|
||||
for (int i = 0; i < size; i++) {
|
||||
LegacyJobLocation jobLocation = new LegacyJobLocation();
|
||||
jobLocation.readExternal(dataIn);
|
||||
result.add(jobLocation);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writePayload(List<LegacyJobLocation> value, DataOutput dataOut) throws IOException {
|
||||
dataOut.writeInt(value.size());
|
||||
for (LegacyJobLocation jobLocation : value) {
|
||||
jobLocation.writeExternal(dataOut);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,378 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.activemq.store.kahadb.scheduler.legacy;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import org.apache.activemq.store.kahadb.disk.index.BTreeIndex;
|
||||
import org.apache.activemq.store.kahadb.disk.journal.Journal;
|
||||
import org.apache.activemq.store.kahadb.disk.journal.Location;
|
||||
import org.apache.activemq.store.kahadb.disk.page.Page;
|
||||
import org.apache.activemq.store.kahadb.disk.page.PageFile;
|
||||
import org.apache.activemq.store.kahadb.disk.page.Transaction;
|
||||
import org.apache.activemq.store.kahadb.disk.util.IntegerMarshaller;
|
||||
import org.apache.activemq.store.kahadb.disk.util.StringMarshaller;
|
||||
import org.apache.activemq.store.kahadb.disk.util.VariableMarshaller;
|
||||
import org.apache.activemq.util.ByteSequence;
|
||||
import org.apache.activemq.util.IOHelper;
|
||||
import org.apache.activemq.util.LockFile;
|
||||
import org.apache.activemq.util.ServiceStopper;
|
||||
import org.apache.activemq.util.ServiceSupport;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Read-only view of a legacy JobSchedulerStore implementation.
|
||||
*/
|
||||
final class LegacyJobSchedulerStoreImpl extends ServiceSupport {
|
||||
|
||||
static final Logger LOG = LoggerFactory.getLogger(LegacyJobSchedulerStoreImpl.class);
|
||||
|
||||
private static final int DATABASE_LOCKED_WAIT_DELAY = 10 * 1000;
|
||||
|
||||
private File directory;
|
||||
private PageFile pageFile;
|
||||
private Journal journal;
|
||||
private LockFile lockFile;
|
||||
private final AtomicLong journalSize = new AtomicLong(0);
|
||||
private boolean failIfDatabaseIsLocked;
|
||||
private int journalMaxFileLength = Journal.DEFAULT_MAX_FILE_LENGTH;
|
||||
private int journalMaxWriteBatchSize = Journal.DEFAULT_MAX_WRITE_BATCH_SIZE;
|
||||
private boolean enableIndexWriteAsync = false;
|
||||
private MetaData metaData = new MetaData(this);
|
||||
private final MetaDataMarshaller metaDataMarshaller = new MetaDataMarshaller(this);
|
||||
private final Map<String, LegacyJobSchedulerImpl> schedulers = new HashMap<String, LegacyJobSchedulerImpl>();
|
||||
|
||||
protected class MetaData {
|
||||
protected MetaData(LegacyJobSchedulerStoreImpl store) {
|
||||
this.store = store;
|
||||
}
|
||||
|
||||
private final LegacyJobSchedulerStoreImpl store;
|
||||
Page<MetaData> page;
|
||||
BTreeIndex<Integer, Integer> journalRC;
|
||||
BTreeIndex<String, LegacyJobSchedulerImpl> storedSchedulers;
|
||||
|
||||
void createIndexes(Transaction tx) throws IOException {
|
||||
this.storedSchedulers = new BTreeIndex<String, LegacyJobSchedulerImpl>(pageFile, tx.allocate().getPageId());
|
||||
this.journalRC = new BTreeIndex<Integer, Integer>(pageFile, tx.allocate().getPageId());
|
||||
}
|
||||
|
||||
void load(Transaction tx) throws IOException {
|
||||
this.storedSchedulers.setKeyMarshaller(StringMarshaller.INSTANCE);
|
||||
this.storedSchedulers.setValueMarshaller(new JobSchedulerMarshaller(this.store));
|
||||
this.storedSchedulers.load(tx);
|
||||
this.journalRC.setKeyMarshaller(IntegerMarshaller.INSTANCE);
|
||||
this.journalRC.setValueMarshaller(IntegerMarshaller.INSTANCE);
|
||||
this.journalRC.load(tx);
|
||||
}
|
||||
|
||||
void loadScheduler(Transaction tx, Map<String, LegacyJobSchedulerImpl> schedulers) throws IOException {
|
||||
for (Iterator<Entry<String, LegacyJobSchedulerImpl>> i = this.storedSchedulers.iterator(tx); i.hasNext();) {
|
||||
Entry<String, LegacyJobSchedulerImpl> entry = i.next();
|
||||
entry.getValue().load(tx);
|
||||
schedulers.put(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
public void read(DataInput is) throws IOException {
|
||||
this.storedSchedulers = new BTreeIndex<String, LegacyJobSchedulerImpl>(pageFile, is.readLong());
|
||||
this.storedSchedulers.setKeyMarshaller(StringMarshaller.INSTANCE);
|
||||
this.storedSchedulers.setValueMarshaller(new JobSchedulerMarshaller(this.store));
|
||||
this.journalRC = new BTreeIndex<Integer, Integer>(pageFile, is.readLong());
|
||||
this.journalRC.setKeyMarshaller(IntegerMarshaller.INSTANCE);
|
||||
this.journalRC.setValueMarshaller(IntegerMarshaller.INSTANCE);
|
||||
}
|
||||
|
||||
public void write(DataOutput os) throws IOException {
|
||||
os.writeLong(this.storedSchedulers.getPageId());
|
||||
os.writeLong(this.journalRC.getPageId());
|
||||
}
|
||||
}
|
||||
|
||||
class MetaDataMarshaller extends VariableMarshaller<MetaData> {
|
||||
private final LegacyJobSchedulerStoreImpl store;
|
||||
|
||||
MetaDataMarshaller(LegacyJobSchedulerStoreImpl store) {
|
||||
this.store = store;
|
||||
}
|
||||
|
||||
@Override
|
||||
public MetaData readPayload(DataInput dataIn) throws IOException {
|
||||
MetaData rc = new MetaData(this.store);
|
||||
rc.read(dataIn);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writePayload(MetaData object, DataOutput dataOut) throws IOException {
|
||||
object.write(dataOut);
|
||||
}
|
||||
}
|
||||
|
||||
class ValueMarshaller extends VariableMarshaller<List<LegacyJobLocation>> {
|
||||
@Override
|
||||
public List<LegacyJobLocation> readPayload(DataInput dataIn) throws IOException {
|
||||
List<LegacyJobLocation> result = new ArrayList<LegacyJobLocation>();
|
||||
int size = dataIn.readInt();
|
||||
for (int i = 0; i < size; i++) {
|
||||
LegacyJobLocation jobLocation = new LegacyJobLocation();
|
||||
jobLocation.readExternal(dataIn);
|
||||
result.add(jobLocation);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writePayload(List<LegacyJobLocation> value, DataOutput dataOut) throws IOException {
|
||||
dataOut.writeInt(value.size());
|
||||
for (LegacyJobLocation jobLocation : value) {
|
||||
jobLocation.writeExternal(dataOut);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class JobSchedulerMarshaller extends VariableMarshaller<LegacyJobSchedulerImpl> {
|
||||
private final LegacyJobSchedulerStoreImpl store;
|
||||
|
||||
JobSchedulerMarshaller(LegacyJobSchedulerStoreImpl store) {
|
||||
this.store = store;
|
||||
}
|
||||
|
||||
@Override
|
||||
public LegacyJobSchedulerImpl readPayload(DataInput dataIn) throws IOException {
|
||||
LegacyJobSchedulerImpl result = new LegacyJobSchedulerImpl(this.store);
|
||||
result.read(dataIn);
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writePayload(LegacyJobSchedulerImpl js, DataOutput dataOut) throws IOException {
|
||||
js.write(dataOut);
|
||||
}
|
||||
}
|
||||
|
||||
public File getDirectory() {
|
||||
return directory;
|
||||
}
|
||||
|
||||
public void setDirectory(File directory) {
|
||||
this.directory = directory;
|
||||
}
|
||||
|
||||
public long size() {
|
||||
if (!isStarted()) {
|
||||
return 0;
|
||||
}
|
||||
try {
|
||||
return journalSize.get() + pageFile.getDiskSize();
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the named Job Scheduler if it exists, otherwise throws an exception.
|
||||
*
|
||||
* @param name
|
||||
* The name of the scheduler that is to be returned.
|
||||
*
|
||||
* @return the named scheduler if it exists.
|
||||
*
|
||||
* @throws Exception if the named scheduler does not exist in this store.
|
||||
*/
|
||||
public LegacyJobSchedulerImpl getJobScheduler(final String name) throws Exception {
|
||||
LegacyJobSchedulerImpl result = this.schedulers.get(name);
|
||||
if (result == null) {
|
||||
throw new NoSuchElementException("No such Job Scheduler in this store: " + name);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the names of all the schedulers that exist in this scheduler store.
|
||||
*
|
||||
* @return a set of names of all scheduler instances in this store.
|
||||
*
|
||||
* @throws Exception if an error occurs while collecting the scheduler names.
|
||||
*/
|
||||
public Set<String> getJobSchedulerNames() throws Exception {
|
||||
Set<String> names = Collections.emptySet();
|
||||
|
||||
if (!schedulers.isEmpty()) {
|
||||
return this.schedulers.keySet();
|
||||
}
|
||||
|
||||
return names;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStart() throws Exception {
|
||||
if (this.directory == null) {
|
||||
this.directory = new File(IOHelper.getDefaultDataDirectory() + File.pathSeparator + "delayedDB");
|
||||
}
|
||||
IOHelper.mkdirs(this.directory);
|
||||
lock();
|
||||
this.journal = new Journal();
|
||||
this.journal.setDirectory(directory);
|
||||
this.journal.setMaxFileLength(getJournalMaxFileLength());
|
||||
this.journal.setWriteBatchSize(getJournalMaxWriteBatchSize());
|
||||
this.journal.setSizeAccumulator(this.journalSize);
|
||||
this.journal.start();
|
||||
this.pageFile = new PageFile(directory, "scheduleDB");
|
||||
this.pageFile.setWriteBatchSize(1);
|
||||
this.pageFile.load();
|
||||
|
||||
this.pageFile.tx().execute(new Transaction.Closure<IOException>() {
|
||||
@Override
|
||||
public void execute(Transaction tx) throws IOException {
|
||||
if (pageFile.getPageCount() == 0) {
|
||||
Page<MetaData> page = tx.allocate();
|
||||
assert page.getPageId() == 0;
|
||||
page.set(metaData);
|
||||
metaData.page = page;
|
||||
metaData.createIndexes(tx);
|
||||
tx.store(metaData.page, metaDataMarshaller, true);
|
||||
|
||||
} else {
|
||||
Page<MetaData> page = tx.load(0, metaDataMarshaller);
|
||||
metaData = page.get();
|
||||
metaData.page = page;
|
||||
}
|
||||
metaData.load(tx);
|
||||
metaData.loadScheduler(tx, schedulers);
|
||||
for (LegacyJobSchedulerImpl js : schedulers.values()) {
|
||||
try {
|
||||
js.start();
|
||||
} catch (Exception e) {
|
||||
LegacyJobSchedulerStoreImpl.LOG.error("Failed to load " + js.getName(), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
this.pageFile.flush();
|
||||
LOG.info(this + " started");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStop(ServiceStopper stopper) throws Exception {
|
||||
for (LegacyJobSchedulerImpl js : this.schedulers.values()) {
|
||||
js.stop();
|
||||
}
|
||||
if (this.pageFile != null) {
|
||||
this.pageFile.unload();
|
||||
}
|
||||
if (this.journal != null) {
|
||||
journal.close();
|
||||
}
|
||||
if (this.lockFile != null) {
|
||||
this.lockFile.unlock();
|
||||
}
|
||||
this.lockFile = null;
|
||||
LOG.info(this + " stopped");
|
||||
}
|
||||
|
||||
ByteSequence getPayload(Location location) throws IllegalStateException, IOException {
|
||||
ByteSequence result = null;
|
||||
result = this.journal.read(location);
|
||||
return result;
|
||||
}
|
||||
|
||||
Location write(ByteSequence payload, boolean sync) throws IllegalStateException, IOException {
|
||||
return this.journal.write(payload, sync);
|
||||
}
|
||||
|
||||
private void lock() throws IOException {
|
||||
if (lockFile == null) {
|
||||
File lockFileName = new File(directory, "lock");
|
||||
lockFile = new LockFile(lockFileName, true);
|
||||
if (failIfDatabaseIsLocked) {
|
||||
lockFile.lock();
|
||||
} else {
|
||||
while (true) {
|
||||
try {
|
||||
lockFile.lock();
|
||||
break;
|
||||
} catch (IOException e) {
|
||||
LOG.info("Database " + lockFileName + " is locked... waiting " + (DATABASE_LOCKED_WAIT_DELAY / 1000)
|
||||
+ " seconds for the database to be unlocked. Reason: " + e);
|
||||
try {
|
||||
Thread.sleep(DATABASE_LOCKED_WAIT_DELAY);
|
||||
} catch (InterruptedException e1) {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
PageFile getPageFile() {
|
||||
this.pageFile.isLoaded();
|
||||
return this.pageFile;
|
||||
}
|
||||
|
||||
public boolean isFailIfDatabaseIsLocked() {
|
||||
return failIfDatabaseIsLocked;
|
||||
}
|
||||
|
||||
public void setFailIfDatabaseIsLocked(boolean failIfDatabaseIsLocked) {
|
||||
this.failIfDatabaseIsLocked = failIfDatabaseIsLocked;
|
||||
}
|
||||
|
||||
public int getJournalMaxFileLength() {
|
||||
return journalMaxFileLength;
|
||||
}
|
||||
|
||||
public void setJournalMaxFileLength(int journalMaxFileLength) {
|
||||
this.journalMaxFileLength = journalMaxFileLength;
|
||||
}
|
||||
|
||||
public int getJournalMaxWriteBatchSize() {
|
||||
return journalMaxWriteBatchSize;
|
||||
}
|
||||
|
||||
public void setJournalMaxWriteBatchSize(int journalMaxWriteBatchSize) {
|
||||
this.journalMaxWriteBatchSize = journalMaxWriteBatchSize;
|
||||
}
|
||||
|
||||
public boolean isEnableIndexWriteAsync() {
|
||||
return enableIndexWriteAsync;
|
||||
}
|
||||
|
||||
public void setEnableIndexWriteAsync(boolean enableIndexWriteAsync) {
|
||||
this.enableIndexWriteAsync = enableIndexWriteAsync;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "LegacyJobSchedulerStore:" + this.directory;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,155 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.activemq.store.kahadb.scheduler.legacy;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.activemq.store.kahadb.data.KahaAddScheduledJobCommand;
|
||||
import org.apache.activemq.store.kahadb.scheduler.JobSchedulerStoreImpl;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Used to upgrade a Legacy Job Scheduler store to the latest version this class
|
||||
* loads a found legacy scheduler store and generates new add commands for all
|
||||
* jobs currently in the store.
|
||||
*/
|
||||
public class LegacyStoreReplayer {
|
||||
|
||||
static final Logger LOG = LoggerFactory.getLogger(LegacyStoreReplayer.class);
|
||||
|
||||
private LegacyJobSchedulerStoreImpl store;
|
||||
private final File legacyStoreDirectory;
|
||||
|
||||
/**
|
||||
* Creates a new Legacy Store Replayer with the given target store
|
||||
* @param targetStore
|
||||
* @param directory
|
||||
*/
|
||||
public LegacyStoreReplayer(File directory) {
|
||||
this.legacyStoreDirectory = directory;
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads the legacy store and prepares it for replay into a newer Store instance.
|
||||
*
|
||||
* @throws IOException if an error occurs while reading in the legacy store.
|
||||
*/
|
||||
public void load() throws IOException {
|
||||
|
||||
store = new LegacyJobSchedulerStoreImpl();
|
||||
store.setDirectory(legacyStoreDirectory);
|
||||
store.setFailIfDatabaseIsLocked(true);
|
||||
|
||||
try {
|
||||
store.start();
|
||||
} catch (IOException ioe) {
|
||||
LOG.warn("Legacy store load failed: ", ioe);
|
||||
throw ioe;
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Legacy store load failed: ", e);
|
||||
throw new IOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Unloads a previously loaded legacy store to release any resources associated with it.
|
||||
*
|
||||
* Once a store is unloaded it cannot be replayed again until it has been reloaded.
|
||||
* @throws IOException
|
||||
*/
|
||||
public void unload() throws IOException {
|
||||
|
||||
if (store != null) {
|
||||
try {
|
||||
store.stop();
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Legacy store unload failed: ", e);
|
||||
throw new IOException(e);
|
||||
} finally {
|
||||
store = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Performs a replay of scheduled jobs into the target JobSchedulerStore.
|
||||
*
|
||||
* @param targetStore
|
||||
* The JobSchedulerStore that will receive the replay events from the legacy store.
|
||||
*
|
||||
* @throws IOException if an error occurs during replay of the legacy store.
|
||||
*/
|
||||
public void startReplay(JobSchedulerStoreImpl targetStore) throws IOException {
|
||||
checkLoaded();
|
||||
|
||||
if (targetStore == null) {
|
||||
throw new IOException("Cannot replay to a null store");
|
||||
}
|
||||
|
||||
try {
|
||||
Set<String> schedulers = store.getJobSchedulerNames();
|
||||
if (!schedulers.isEmpty()) {
|
||||
|
||||
for (String name : schedulers) {
|
||||
LegacyJobSchedulerImpl scheduler = store.getJobScheduler(name);
|
||||
LOG.info("Replay of legacy store {} starting.", name);
|
||||
replayScheduler(scheduler, targetStore);
|
||||
}
|
||||
}
|
||||
|
||||
LOG.info("Replay of legacy store complate.");
|
||||
} catch (IOException ioe) {
|
||||
LOG.warn("Failed during replay of legacy store: ", ioe);
|
||||
throw ioe;
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Failed during replay of legacy store: ", e);
|
||||
throw new IOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private final void replayScheduler(LegacyJobSchedulerImpl legacy, JobSchedulerStoreImpl target) throws Exception {
|
||||
List<LegacyJobImpl> jobs = legacy.getAllJobs();
|
||||
|
||||
String schedulerName = legacy.getName();
|
||||
|
||||
for (LegacyJobImpl job : jobs) {
|
||||
LOG.trace("Storing job from legacy store to new store: {}", job);
|
||||
KahaAddScheduledJobCommand newJob = new KahaAddScheduledJobCommand();
|
||||
newJob.setScheduler(schedulerName);
|
||||
newJob.setJobId(job.getJobId());
|
||||
newJob.setStartTime(job.getStartTime());
|
||||
newJob.setCronEntry(job.getCronEntry());
|
||||
newJob.setDelay(job.getDelay());
|
||||
newJob.setPeriod(job.getPeriod());
|
||||
newJob.setRepeat(job.getRepeat());
|
||||
newJob.setNextExecutionTime(job.getNextExecutionTime());
|
||||
newJob.setPayload(job.getPayload());
|
||||
|
||||
target.store(newJob);
|
||||
}
|
||||
}
|
||||
|
||||
private final void checkLoaded() throws IOException {
|
||||
if (this.store == null) {
|
||||
throw new IOException("Cannot replay until legacy store is loaded.");
|
||||
}
|
||||
}
|
||||
}
|
|
@ -32,6 +32,11 @@ enum KahaEntryType {
|
|||
KAHA_PRODUCER_AUDIT_COMMAND = 8;
|
||||
KAHA_ACK_MESSAGE_FILE_MAP_COMMAND = 9;
|
||||
KAHA_UPDATE_MESSAGE_COMMAND = 10;
|
||||
KAHA_ADD_SCHEDULED_JOB_COMMAND = 11;
|
||||
KAHA_RESCHEDULE_JOB_COMMAND = 12;
|
||||
KAHA_REMOVE_SCHEDULED_JOB_COMMAND = 13;
|
||||
KAHA_REMOVE_SCHEDULED_JOBS_COMMAND = 14;
|
||||
KAHA_DESTROY_SCHEDULER_COMMAND = 15;
|
||||
}
|
||||
|
||||
message KahaTraceCommand {
|
||||
|
@ -179,6 +184,62 @@ message KahaLocation {
|
|||
required int32 offset = 2;
|
||||
}
|
||||
|
||||
message KahaAddScheduledJobCommand {
|
||||
//| option java_implments = "org.apache.activemq.store.kahadb.JournalCommand<KahaAddScheduledJobCommand>";
|
||||
//| option java_visitor = "org.apache.activemq.store.kahadb.Visitor:void:java.io.IOException";
|
||||
//| option java_type_method = "KahaEntryType";
|
||||
|
||||
required string scheduler=1;
|
||||
required string job_id=2;
|
||||
required int64 start_time=3;
|
||||
required string cron_entry=4;
|
||||
required int64 delay=5;
|
||||
required int64 period=6;
|
||||
required int32 repeat=7;
|
||||
required bytes payload=8;
|
||||
required int64 next_execution_time=9;
|
||||
}
|
||||
|
||||
message KahaRescheduleJobCommand {
|
||||
//| option java_implments = "org.apache.activemq.store.kahadb.JournalCommand<KahaRescheduleJobCommand>";
|
||||
//| option java_visitor = "org.apache.activemq.store.kahadb.Visitor:void:java.io.IOException";
|
||||
//| option java_type_method = "KahaEntryType";
|
||||
|
||||
required string scheduler=1;
|
||||
required string job_id=2;
|
||||
required int64 execution_time=3;
|
||||
required int64 next_execution_time=4;
|
||||
required int32 rescheduled_count=5;
|
||||
}
|
||||
|
||||
message KahaRemoveScheduledJobCommand {
|
||||
//| option java_implments = "org.apache.activemq.store.kahadb.JournalCommand<KahaRemoveScheduledJobCommand>";
|
||||
//| option java_visitor = "org.apache.activemq.store.kahadb.Visitor:void:java.io.IOException";
|
||||
//| option java_type_method = "KahaEntryType";
|
||||
|
||||
required string scheduler=1;
|
||||
required string job_id=2;
|
||||
required int64 next_execution_time=3;
|
||||
}
|
||||
|
||||
message KahaRemoveScheduledJobsCommand {
|
||||
//| option java_implments = "org.apache.activemq.store.kahadb.JournalCommand<KahaRemoveScheduledJobsCommand>";
|
||||
//| option java_visitor = "org.apache.activemq.store.kahadb.Visitor:void:java.io.IOException";
|
||||
//| option java_type_method = "KahaEntryType";
|
||||
|
||||
required string scheduler=1;
|
||||
required int64 start_time=2;
|
||||
required int64 end_time=3;
|
||||
}
|
||||
|
||||
message KahaDestroySchedulerCommand {
|
||||
//| option java_implments = "org.apache.activemq.store.kahadb.JournalCommand<KahaDestroySchedulerCommand>";
|
||||
//| option java_visitor = "org.apache.activemq.store.kahadb.Visitor:void:java.io.IOException";
|
||||
//| option java_type_method = "KahaEntryType";
|
||||
|
||||
required string scheduler=1;
|
||||
}
|
||||
|
||||
// TODO things to ponder
|
||||
// should we move more message fields
|
||||
// that are set by the sender (and rarely required by the broker
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.apache.activemq.leveldb.util.Log
|
|||
import org.apache.activemq.store.PList.PListIterator
|
||||
import org.fusesource.hawtbuf.{UTF8Buffer, DataByteArrayOutputStream}
|
||||
import org.fusesource.hawtdispatch;
|
||||
import org.apache.activemq.broker.scheduler.JobSchedulerStore;
|
||||
|
||||
object LevelDBStore extends Log {
|
||||
val DEFAULT_DIRECTORY = new File("LevelDB");
|
||||
|
@ -602,6 +603,10 @@ class LevelDBStore extends LockableServiceSupport with BrokerServiceAware with P
|
|||
rc
|
||||
}
|
||||
|
||||
def createJobSchedulerStore():JobSchedulerStore = {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
def removeTopicMessageStore(destination: ActiveMQTopic): Unit = {
|
||||
topics.remove(destination).foreach { store=>
|
||||
store.subscriptions.values.foreach { sub =>
|
||||
|
|
|
@ -25,6 +25,7 @@ import java.io.File
|
|||
import java.io.IOException
|
||||
import java.util.Set
|
||||
import org.apache.activemq.util.{ServiceStopper, ServiceSupport}
|
||||
import org.apache.activemq.broker.scheduler.JobSchedulerStore
|
||||
|
||||
/**
|
||||
*/
|
||||
|
@ -44,6 +45,10 @@ abstract class ProxyLevelDBStore extends LockableServiceSupport with BrokerServi
|
|||
return proxy_target.createTopicMessageStore(destination)
|
||||
}
|
||||
|
||||
def createJobSchedulerStore():JobSchedulerStore = {
|
||||
return proxy_target.createJobSchedulerStore()
|
||||
}
|
||||
|
||||
def setDirectory(dir: File) {
|
||||
proxy_target.setDirectory(dir)
|
||||
}
|
||||
|
|
|
@ -39,6 +39,7 @@ public class JobSchedulerBrokerShutdownTest extends EmbeddedBrokerTestSupport {
|
|||
|
||||
BrokerService broker = super.createBroker();
|
||||
broker.setSchedulerSupport(true);
|
||||
broker.setDataDirectory("target");
|
||||
broker.setSchedulerDirectoryFile(schedulerDirectory);
|
||||
broker.getSystemUsage().getStoreUsage().setLimit(1 * 512);
|
||||
broker.deleteAllMessages();
|
||||
|
|
|
@ -0,0 +1,155 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.activemq.broker.scheduler;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import javax.jms.Connection;
|
||||
import javax.jms.MessageProducer;
|
||||
import javax.jms.Session;
|
||||
import javax.jms.TextMessage;
|
||||
import javax.management.openmbean.TabularData;
|
||||
|
||||
import org.apache.activemq.ScheduledMessage;
|
||||
import org.apache.activemq.broker.jmx.JobSchedulerViewMBean;
|
||||
import org.apache.activemq.util.Wait;
|
||||
import org.junit.Test;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Tests of the JMX JobSchedulerStore management MBean.
|
||||
*/
|
||||
public class JobSchedulerJmxManagementTests extends JobSchedulerTestSupport {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(JobSchedulerJmxManagementTests.class);
|
||||
|
||||
@Test
|
||||
public void testJobSchedulerMBeanIsRegistered() throws Exception {
|
||||
JobSchedulerViewMBean view = getJobSchedulerMBean();
|
||||
assertNotNull(view);
|
||||
assertTrue(view.getAllJobs().isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetNumberOfJobs() throws Exception {
|
||||
JobSchedulerViewMBean view = getJobSchedulerMBean();
|
||||
assertNotNull(view);
|
||||
assertTrue(view.getAllJobs().isEmpty());
|
||||
scheduleMessage(60000, -1, -1);
|
||||
assertFalse(view.getAllJobs().isEmpty());
|
||||
assertEquals(1, view.getAllJobs().size());
|
||||
scheduleMessage(60000, -1, -1);
|
||||
assertEquals(2, view.getAllJobs().size());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRemvoeJob() throws Exception {
|
||||
JobSchedulerViewMBean view = getJobSchedulerMBean();
|
||||
assertNotNull(view);
|
||||
assertTrue(view.getAllJobs().isEmpty());
|
||||
scheduleMessage(60000, -1, -1);
|
||||
assertFalse(view.getAllJobs().isEmpty());
|
||||
TabularData jobs = view.getAllJobs();
|
||||
assertEquals(1, jobs.size());
|
||||
for (Object key : jobs.keySet()) {
|
||||
String jobId = ((List<?>)key).get(0).toString();
|
||||
LOG.info("Attempting to remove Job: {}", jobId);
|
||||
view.removeJob(jobId);
|
||||
}
|
||||
assertTrue(view.getAllJobs().isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRemvoeJobInRange() throws Exception {
|
||||
JobSchedulerViewMBean view = getJobSchedulerMBean();
|
||||
assertNotNull(view);
|
||||
assertTrue(view.getAllJobs().isEmpty());
|
||||
scheduleMessage(60000, -1, -1);
|
||||
assertFalse(view.getAllJobs().isEmpty());
|
||||
String now = JobSupport.getDateTime(System.currentTimeMillis());
|
||||
String later = JobSupport.getDateTime(System.currentTimeMillis() + 120 * 1000);
|
||||
view.removeAllJobs(now, later);
|
||||
assertTrue(view.getAllJobs().isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetNextScheduledJob() throws Exception {
|
||||
JobSchedulerViewMBean view = getJobSchedulerMBean();
|
||||
assertNotNull(view);
|
||||
assertTrue(view.getAllJobs().isEmpty());
|
||||
scheduleMessage(60000, -1, -1);
|
||||
assertFalse(view.getAllJobs().isEmpty());
|
||||
long before = System.currentTimeMillis() + 57 * 1000;
|
||||
long toLate = System.currentTimeMillis() + 63 * 1000;
|
||||
String next = view.getNextScheduleTime();
|
||||
long nextTime = JobSupport.getDataTime(next);
|
||||
LOG.info("Next Scheduled Time: {}", next);
|
||||
assertTrue(nextTime > before);
|
||||
assertTrue(nextTime < toLate);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetExecutionCount() throws Exception {
|
||||
final JobSchedulerViewMBean view = getJobSchedulerMBean();
|
||||
assertNotNull(view);
|
||||
assertTrue(view.getAllJobs().isEmpty());
|
||||
scheduleMessage(10000, 1000, 10);
|
||||
assertFalse(view.getAllJobs().isEmpty());
|
||||
TabularData jobs = view.getAllJobs();
|
||||
assertEquals(1, jobs.size());
|
||||
String jobId = null;
|
||||
for (Object key : jobs.keySet()) {
|
||||
jobId = ((List<?>)key).get(0).toString();
|
||||
}
|
||||
|
||||
final String fixedJobId = jobId;
|
||||
LOG.info("Attempting to get execution count for Job: {}", jobId);
|
||||
assertEquals(0, view.getExecutionCount(jobId));
|
||||
|
||||
assertTrue("Should execute again", Wait.waitFor(new Wait.Condition() {
|
||||
|
||||
@Override
|
||||
public boolean isSatisified() throws Exception {
|
||||
return view.getExecutionCount(fixedJobId) > 0;
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean isUseJmx() {
|
||||
return true;
|
||||
}
|
||||
|
||||
protected void scheduleMessage(int time, int period, int repeat) throws Exception {
|
||||
Connection connection = createConnection();
|
||||
Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
|
||||
MessageProducer producer = session.createProducer(destination);
|
||||
TextMessage message = session.createTextMessage("test msg");
|
||||
message.setLongProperty(ScheduledMessage.AMQ_SCHEDULED_DELAY, time);
|
||||
message.setLongProperty(ScheduledMessage.AMQ_SCHEDULED_PERIOD, period);
|
||||
message.setIntProperty(ScheduledMessage.AMQ_SCHEDULED_REPEAT, repeat);
|
||||
producer.send(message);
|
||||
connection.close();
|
||||
}
|
||||
}
|
|
@ -16,7 +16,11 @@
|
|||
*/
|
||||
package org.apache.activemq.broker.scheduler;
|
||||
|
||||
import java.io.File;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
|
@ -29,18 +33,17 @@ import javax.jms.MessageProducer;
|
|||
import javax.jms.Session;
|
||||
import javax.jms.TextMessage;
|
||||
|
||||
import org.apache.activemq.EmbeddedBrokerTestSupport;
|
||||
import org.apache.activemq.ScheduledMessage;
|
||||
import org.apache.activemq.broker.BrokerService;
|
||||
import org.apache.activemq.util.IOHelper;
|
||||
import org.apache.activemq.util.IdGenerator;
|
||||
import org.junit.Test;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class JobSchedulerManagementTest extends EmbeddedBrokerTestSupport {
|
||||
public class JobSchedulerManagementTest extends JobSchedulerTestSupport {
|
||||
|
||||
private static final transient Logger LOG = LoggerFactory.getLogger(JobSchedulerManagementTest.class);
|
||||
|
||||
@Test
|
||||
public void testRemoveAllScheduled() throws Exception {
|
||||
final int COUNT = 5;
|
||||
Connection connection = createConnection();
|
||||
|
@ -77,6 +80,7 @@ public class JobSchedulerManagementTest extends EmbeddedBrokerTestSupport {
|
|||
assertEquals(latch.getCount(), COUNT);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRemoveAllScheduledAtTime() throws Exception {
|
||||
final int COUNT = 3;
|
||||
Connection connection = createConnection();
|
||||
|
@ -122,8 +126,7 @@ public class JobSchedulerManagementTest extends EmbeddedBrokerTestSupport {
|
|||
// Send the remove request
|
||||
MessageProducer producer = session.createProducer(management);
|
||||
Message request = session.createMessage();
|
||||
request.setStringProperty(ScheduledMessage.AMQ_SCHEDULER_ACTION,
|
||||
ScheduledMessage.AMQ_SCHEDULER_ACTION_REMOVEALL);
|
||||
request.setStringProperty(ScheduledMessage.AMQ_SCHEDULER_ACTION, ScheduledMessage.AMQ_SCHEDULER_ACTION_REMOVEALL);
|
||||
request.setStringProperty(ScheduledMessage.AMQ_SCHEDULER_ACTION_START_TIME, Long.toString(start));
|
||||
request.setStringProperty(ScheduledMessage.AMQ_SCHEDULER_ACTION_END_TIME, Long.toString(end));
|
||||
producer.send(request);
|
||||
|
@ -143,6 +146,7 @@ public class JobSchedulerManagementTest extends EmbeddedBrokerTestSupport {
|
|||
assertEquals(2, latch.getCount());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBrowseAllScheduled() throws Exception {
|
||||
final int COUNT = 10;
|
||||
Connection connection = createConnection();
|
||||
|
@ -191,7 +195,8 @@ public class JobSchedulerManagementTest extends EmbeddedBrokerTestSupport {
|
|||
Thread.sleep(2000);
|
||||
assertEquals(latch.getCount(), COUNT);
|
||||
|
||||
// now see if we got all the scheduled messages on the browse destination.
|
||||
// now see if we got all the scheduled messages on the browse
|
||||
// destination.
|
||||
latch.await(10, TimeUnit.SECONDS);
|
||||
assertEquals(browsedLatch.getCount(), 0);
|
||||
|
||||
|
@ -200,6 +205,7 @@ public class JobSchedulerManagementTest extends EmbeddedBrokerTestSupport {
|
|||
assertEquals(latch.getCount(), 0);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBrowseWindowlScheduled() throws Exception {
|
||||
final int COUNT = 10;
|
||||
Connection connection = createConnection();
|
||||
|
@ -255,15 +261,18 @@ public class JobSchedulerManagementTest extends EmbeddedBrokerTestSupport {
|
|||
Thread.sleep(2000);
|
||||
assertEquals(COUNT + 2, latch.getCount());
|
||||
|
||||
// now see if we got all the scheduled messages on the browse destination.
|
||||
// now see if we got all the scheduled messages on the browse
|
||||
// destination.
|
||||
latch.await(15, TimeUnit.SECONDS);
|
||||
assertEquals(0, browsedLatch.getCount());
|
||||
|
||||
// now see if we got all the scheduled messages on the browse destination.
|
||||
// now see if we got all the scheduled messages on the browse
|
||||
// destination.
|
||||
latch.await(20, TimeUnit.SECONDS);
|
||||
assertEquals(0, latch.getCount());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRemoveScheduled() throws Exception {
|
||||
final int COUNT = 10;
|
||||
Connection connection = createConnection();
|
||||
|
@ -297,8 +306,7 @@ public class JobSchedulerManagementTest extends EmbeddedBrokerTestSupport {
|
|||
|
||||
// Send the browse request
|
||||
Message request = session.createMessage();
|
||||
request.setStringProperty(ScheduledMessage.AMQ_SCHEDULER_ACTION,
|
||||
ScheduledMessage.AMQ_SCHEDULER_ACTION_BROWSE);
|
||||
request.setStringProperty(ScheduledMessage.AMQ_SCHEDULER_ACTION, ScheduledMessage.AMQ_SCHEDULER_ACTION_BROWSE);
|
||||
request.setJMSReplyTo(browseDest);
|
||||
producer.send(request);
|
||||
|
||||
|
@ -307,14 +315,12 @@ public class JobSchedulerManagementTest extends EmbeddedBrokerTestSupport {
|
|||
Message message = browser.receive(2000);
|
||||
assertNotNull(message);
|
||||
|
||||
try{
|
||||
try {
|
||||
Message remove = session.createMessage();
|
||||
remove.setStringProperty(ScheduledMessage.AMQ_SCHEDULER_ACTION,
|
||||
ScheduledMessage.AMQ_SCHEDULER_ACTION_REMOVE);
|
||||
remove.setStringProperty(ScheduledMessage.AMQ_SCHEDULED_ID,
|
||||
message.getStringProperty(ScheduledMessage.AMQ_SCHEDULED_ID));
|
||||
remove.setStringProperty(ScheduledMessage.AMQ_SCHEDULER_ACTION, ScheduledMessage.AMQ_SCHEDULER_ACTION_REMOVE);
|
||||
remove.setStringProperty(ScheduledMessage.AMQ_SCHEDULED_ID, message.getStringProperty(ScheduledMessage.AMQ_SCHEDULED_ID));
|
||||
producer.send(remove);
|
||||
} catch(Exception e) {
|
||||
} catch (Exception e) {
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -323,6 +329,7 @@ public class JobSchedulerManagementTest extends EmbeddedBrokerTestSupport {
|
|||
assertEquals(COUNT, latch.getCount());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRemoveNotScheduled() throws Exception {
|
||||
Connection connection = createConnection();
|
||||
|
||||
|
@ -333,19 +340,19 @@ public class JobSchedulerManagementTest extends EmbeddedBrokerTestSupport {
|
|||
|
||||
MessageProducer producer = session.createProducer(management);
|
||||
|
||||
try{
|
||||
try {
|
||||
|
||||
// Send the remove request
|
||||
Message remove = session.createMessage();
|
||||
remove.setStringProperty(ScheduledMessage.AMQ_SCHEDULER_ACTION,
|
||||
ScheduledMessage.AMQ_SCHEDULER_ACTION_REMOVEALL);
|
||||
remove.setStringProperty(ScheduledMessage.AMQ_SCHEDULER_ACTION, ScheduledMessage.AMQ_SCHEDULER_ACTION_REMOVEALL);
|
||||
remove.setStringProperty(ScheduledMessage.AMQ_SCHEDULED_ID, new IdGenerator().generateId());
|
||||
producer.send(remove);
|
||||
} catch(Exception e) {
|
||||
} catch (Exception e) {
|
||||
fail("Caught unexpected exception during remove of unscheduled message.");
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBrowseWithSelector() throws Exception {
|
||||
Connection connection = createConnection();
|
||||
|
||||
|
@ -362,7 +369,7 @@ public class JobSchedulerManagementTest extends EmbeddedBrokerTestSupport {
|
|||
Destination browseDest = session.createTemporaryTopic();
|
||||
|
||||
// Create the "Browser"
|
||||
MessageConsumer browser = session.createConsumer(browseDest, ScheduledMessage.AMQ_SCHEDULED_DELAY + " = 45000" );
|
||||
MessageConsumer browser = session.createConsumer(browseDest, ScheduledMessage.AMQ_SCHEDULED_DELAY + " = 45000");
|
||||
|
||||
connection.start();
|
||||
|
||||
|
@ -383,7 +390,6 @@ public class JobSchedulerManagementTest extends EmbeddedBrokerTestSupport {
|
|||
assertNull(message);
|
||||
}
|
||||
|
||||
|
||||
protected void scheduleMessage(Connection connection, long delay) throws Exception {
|
||||
scheduleMessage(connection, delay, 1);
|
||||
}
|
||||
|
@ -394,38 +400,10 @@ public class JobSchedulerManagementTest extends EmbeddedBrokerTestSupport {
|
|||
TextMessage message = session.createTextMessage("test msg");
|
||||
message.setLongProperty(ScheduledMessage.AMQ_SCHEDULED_DELAY, delay);
|
||||
|
||||
for(int i = 0; i < count; ++i ) {
|
||||
for (int i = 0; i < count; ++i) {
|
||||
producer.send(message);
|
||||
}
|
||||
|
||||
producer.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void setUp() throws Exception {
|
||||
bindAddress = "vm://localhost";
|
||||
super.setUp();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected BrokerService createBroker() throws Exception {
|
||||
return createBroker(true);
|
||||
}
|
||||
|
||||
protected BrokerService createBroker(boolean delete) throws Exception {
|
||||
File schedulerDirectory = new File("target/scheduler");
|
||||
if (delete) {
|
||||
IOHelper.mkdirs(schedulerDirectory);
|
||||
IOHelper.deleteChildren(schedulerDirectory);
|
||||
}
|
||||
BrokerService answer = new BrokerService();
|
||||
answer.setPersistent(true);
|
||||
answer.setDeleteAllMessagesOnStartup(true);
|
||||
answer.setDataDirectory("target");
|
||||
answer.setSchedulerDirectoryFile(schedulerDirectory);
|
||||
answer.setSchedulerSupport(true);
|
||||
answer.setUseJmx(false);
|
||||
answer.addConnector(bindAddress);
|
||||
return answer;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,125 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.activemq.broker.scheduler;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.activemq.store.kahadb.scheduler.JobSchedulerStoreImpl;
|
||||
import org.apache.activemq.util.ByteSequence;
|
||||
import org.apache.activemq.util.IOHelper;
|
||||
import org.apache.activemq.util.Wait;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class JobSchedulerStoreCheckpointTest {
|
||||
|
||||
static final Logger LOG = LoggerFactory.getLogger(JobSchedulerStoreCheckpointTest.class);
|
||||
|
||||
private JobSchedulerStoreImpl store;
|
||||
private JobScheduler scheduler;
|
||||
private ByteSequence payload;
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
File directory = new File("target/test/ScheduledJobsDB");
|
||||
IOHelper.mkdirs(directory);
|
||||
IOHelper.deleteChildren(directory);
|
||||
startStore(directory);
|
||||
|
||||
byte[] data = new byte[8192];
|
||||
for (int i = 0; i < data.length; ++i) {
|
||||
data[i] = (byte) (i % 256);
|
||||
}
|
||||
|
||||
payload = new ByteSequence(data);
|
||||
}
|
||||
|
||||
protected void startStore(File directory) throws Exception {
|
||||
store = new JobSchedulerStoreImpl();
|
||||
store.setDirectory(directory);
|
||||
store.setCheckpointInterval(5000);
|
||||
store.setCleanupInterval(10000);
|
||||
store.setJournalMaxFileLength(10 * 1024);
|
||||
store.start();
|
||||
scheduler = store.getJobScheduler("test");
|
||||
scheduler.startDispatching();
|
||||
}
|
||||
|
||||
private int getNumJournalFiles() throws IOException {
|
||||
return store.getJournal().getFileMap().size();
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
scheduler.stopDispatching();
|
||||
store.stop();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void test() throws Exception {
|
||||
final int COUNT = 10;
|
||||
final CountDownLatch latch = new CountDownLatch(COUNT);
|
||||
scheduler.addListener(new JobListener() {
|
||||
@Override
|
||||
public void scheduledJob(String id, ByteSequence job) {
|
||||
latch.countDown();
|
||||
}
|
||||
});
|
||||
|
||||
long time = TimeUnit.SECONDS.toMillis(30);
|
||||
for (int i = 0; i < COUNT; i++) {
|
||||
scheduler.schedule("id" + i, payload, "", time, 0, 0);
|
||||
}
|
||||
|
||||
int size = scheduler.getAllJobs().size();
|
||||
assertEquals(size, COUNT);
|
||||
|
||||
LOG.info("Number of journal log files: {}", getNumJournalFiles());
|
||||
// need a little slack so go over 60 seconds
|
||||
assertTrue(latch.await(70, TimeUnit.SECONDS));
|
||||
assertEquals(0, latch.getCount());
|
||||
|
||||
for (int i = 0; i < COUNT; i++) {
|
||||
scheduler.schedule("id" + i, payload, "", time, 0, 0);
|
||||
}
|
||||
|
||||
LOG.info("Number of journal log files: {}", getNumJournalFiles());
|
||||
// need a little slack so go over 60 seconds
|
||||
assertTrue(latch.await(70, TimeUnit.SECONDS));
|
||||
assertEquals(0, latch.getCount());
|
||||
|
||||
assertTrue("Should be only one log left: " + getNumJournalFiles(), Wait.waitFor(new Wait.Condition() {
|
||||
|
||||
@Override
|
||||
public boolean isSatisified() throws Exception {
|
||||
return getNumJournalFiles() == 1;
|
||||
}
|
||||
}, TimeUnit.MINUTES.toMillis(2)));
|
||||
|
||||
LOG.info("Number of journal log files: {}", getNumJournalFiles());
|
||||
}
|
||||
}
|
|
@ -16,50 +16,62 @@
|
|||
*/
|
||||
package org.apache.activemq.broker.scheduler;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.activemq.store.kahadb.scheduler.JobSchedulerStoreImpl;
|
||||
import org.apache.activemq.util.ByteSequence;
|
||||
import org.apache.activemq.util.IOHelper;
|
||||
import org.junit.Test;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class JobSchedulerStoreTest extends TestCase {
|
||||
public class JobSchedulerStoreTest {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(JobSchedulerStoreTest.class);
|
||||
|
||||
@Test(timeout = 120 * 1000)
|
||||
public void testRestart() throws Exception {
|
||||
JobSchedulerStore store = new JobSchedulerStoreImpl();
|
||||
File directory = new File("target/test/ScheduledDB");
|
||||
IOHelper.mkdirs(directory);
|
||||
IOHelper.deleteChildren(directory);
|
||||
store.setDirectory(directory);
|
||||
IOHelper.mkdirs(directory);
|
||||
IOHelper.deleteChildren(directory);
|
||||
store.setDirectory(directory);
|
||||
final int NUMBER = 1000;
|
||||
store.start();
|
||||
List<ByteSequence>list = new ArrayList<ByteSequence>();
|
||||
for (int i = 0; i < NUMBER;i++ ) {
|
||||
ByteSequence buff = new ByteSequence(new String("testjob"+i).getBytes());
|
||||
List<ByteSequence> list = new ArrayList<ByteSequence>();
|
||||
for (int i = 0; i < NUMBER; i++) {
|
||||
ByteSequence buff = new ByteSequence(new String("testjob" + i).getBytes());
|
||||
list.add(buff);
|
||||
}
|
||||
|
||||
JobScheduler js = store.getJobScheduler("test");
|
||||
js.startDispatching();
|
||||
int count = 0;
|
||||
long startTime = 10 * 60 * 1000; long period = startTime;
|
||||
for (ByteSequence job:list) {
|
||||
js.schedule("id:"+(count++), job, "", startTime, period, -1);
|
||||
long startTime = 10 * 60 * 1000;
|
||||
long period = startTime;
|
||||
for (ByteSequence job : list) {
|
||||
js.schedule("id:" + (count++), job, "", startTime, period, -1);
|
||||
}
|
||||
List<Job>test = js.getAllJobs();
|
||||
assertEquals(list.size(),test.size());
|
||||
|
||||
List<Job> test = js.getAllJobs();
|
||||
LOG.debug("Found {} jobs in the store before restart", test.size());
|
||||
assertEquals(list.size(), test.size());
|
||||
store.stop();
|
||||
|
||||
store.start();
|
||||
js = store.getJobScheduler("test");
|
||||
test = js.getAllJobs();
|
||||
assertEquals(list.size(),test.size());
|
||||
for (int i = 0; i < list.size();i++) {
|
||||
LOG.debug("Found {} jobs in the store after restart", test.size());
|
||||
assertEquals(list.size(), test.size());
|
||||
|
||||
for (int i = 0; i < list.size(); i++) {
|
||||
String orig = new String(list.get(i).getData());
|
||||
String payload = new String(test.get(i).getPayload());
|
||||
assertEquals(orig,payload);
|
||||
assertEquals(orig, payload);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,8 +31,13 @@ import org.apache.activemq.util.IOHelper;
|
|||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class JobSchedulerTest {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(JobSchedulerTest.class);
|
||||
|
||||
private JobSchedulerStore store;
|
||||
private JobScheduler scheduler;
|
||||
|
||||
|
@ -172,6 +177,37 @@ public class JobSchedulerTest {
|
|||
assertEquals(size, COUNT);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetExecutionCount() throws Exception {
|
||||
final String jobId = "Job-1";
|
||||
long time = 10000;
|
||||
final CountDownLatch done = new CountDownLatch(10);
|
||||
|
||||
String str = new String("test");
|
||||
scheduler.schedule(jobId, new ByteSequence(str.getBytes()), "", time, 1000, 10);
|
||||
|
||||
int size = scheduler.getAllJobs().size();
|
||||
assertEquals(size, 1);
|
||||
|
||||
scheduler.addListener(new JobListener() {
|
||||
@Override
|
||||
public void scheduledJob(String id, ByteSequence job) {
|
||||
LOG.info("Job exectued: {}", 11 - done.getCount());
|
||||
done.countDown();
|
||||
}
|
||||
});
|
||||
|
||||
List<Job> jobs = scheduler.getNextScheduleJobs();
|
||||
assertEquals(1, jobs.size());
|
||||
Job job = jobs.get(0);
|
||||
assertEquals(jobId, job.getJobId());
|
||||
assertEquals(0, job.getExecutionCount());
|
||||
assertTrue("Should have fired ten times.", done.await(60, TimeUnit.SECONDS));
|
||||
// The job is not updated on the last firing as it is removed from the store following
|
||||
// it's last execution so the count will always be one less than the max firings.
|
||||
assertTrue(job.getExecutionCount() >= 9);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testgetAllJobs() throws Exception {
|
||||
final int COUNT = 10;
|
||||
|
|
|
@ -0,0 +1,112 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.activemq.broker.scheduler;
|
||||
|
||||
import java.io.File;
|
||||
|
||||
import javax.jms.Connection;
|
||||
import javax.jms.ConnectionFactory;
|
||||
import javax.jms.Queue;
|
||||
import javax.management.ObjectName;
|
||||
|
||||
import org.apache.activemq.ActiveMQConnectionFactory;
|
||||
import org.apache.activemq.broker.BrokerService;
|
||||
import org.apache.activemq.broker.jmx.JobSchedulerViewMBean;
|
||||
import org.apache.activemq.command.ActiveMQQueue;
|
||||
import org.apache.activemq.util.IOHelper;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Rule;
|
||||
import org.junit.rules.TestName;
|
||||
|
||||
/**
|
||||
* Base class for tests of the Broker's JobSchedulerStore.
|
||||
*/
|
||||
public class JobSchedulerTestSupport {
|
||||
|
||||
@Rule public TestName name = new TestName();
|
||||
|
||||
protected String connectionUri;
|
||||
protected BrokerService broker;
|
||||
protected JobScheduler jobScheduler;
|
||||
protected Queue destination;
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
connectionUri = "vm://localhost";
|
||||
destination = new ActiveMQQueue(name.getMethodName());
|
||||
|
||||
broker = createBroker();
|
||||
broker.start();
|
||||
broker.waitUntilStarted();
|
||||
|
||||
jobScheduler = broker.getJobSchedulerStore().getJobScheduler("JMS");
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
if (broker != null) {
|
||||
broker.stop();
|
||||
broker.waitUntilStopped();
|
||||
}
|
||||
}
|
||||
|
||||
protected Connection createConnection() throws Exception {
|
||||
return createConnectionFactory().createConnection();
|
||||
}
|
||||
|
||||
protected ConnectionFactory createConnectionFactory() throws Exception {
|
||||
return new ActiveMQConnectionFactory(connectionUri);
|
||||
}
|
||||
|
||||
protected BrokerService createBroker() throws Exception {
|
||||
return createBroker(true);
|
||||
}
|
||||
|
||||
protected boolean isUseJmx() {
|
||||
return false;
|
||||
}
|
||||
|
||||
protected JobSchedulerViewMBean getJobSchedulerMBean() throws Exception {
|
||||
ObjectName objectName = broker.getAdminView().getJMSJobScheduler();
|
||||
JobSchedulerViewMBean scheduler = null;
|
||||
if (objectName != null) {
|
||||
scheduler = (JobSchedulerViewMBean) broker.getManagementContext()
|
||||
.newProxyInstance(objectName, JobSchedulerViewMBean.class, true);
|
||||
}
|
||||
|
||||
return scheduler;
|
||||
}
|
||||
|
||||
protected BrokerService createBroker(boolean delete) throws Exception {
|
||||
File schedulerDirectory = new File("target/scheduler");
|
||||
if (delete) {
|
||||
IOHelper.mkdirs(schedulerDirectory);
|
||||
IOHelper.deleteChildren(schedulerDirectory);
|
||||
}
|
||||
|
||||
BrokerService answer = new BrokerService();
|
||||
answer.setPersistent(true);
|
||||
answer.setDeleteAllMessagesOnStartup(true);
|
||||
answer.setDataDirectory("target");
|
||||
answer.setSchedulerDirectoryFile(schedulerDirectory);
|
||||
answer.setSchedulerSupport(true);
|
||||
answer.setUseJmx(isUseJmx());
|
||||
return answer;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,179 @@
|
|||
package org.apache.activemq.broker.scheduler;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.security.ProtectionDomain;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import javax.jms.Connection;
|
||||
import javax.jms.MessageProducer;
|
||||
import javax.jms.Queue;
|
||||
import javax.jms.Session;
|
||||
import javax.jms.TextMessage;
|
||||
|
||||
import org.apache.activemq.ActiveMQConnectionFactory;
|
||||
import org.apache.activemq.ScheduledMessage;
|
||||
import org.apache.activemq.broker.BrokerService;
|
||||
import org.apache.activemq.store.kahadb.scheduler.JobSchedulerStoreImpl;
|
||||
import org.apache.activemq.util.IOHelper;
|
||||
import org.apache.activemq.util.Wait;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class KahaDBSchedulerIndexRebuildTest {
|
||||
|
||||
static final Logger LOG = LoggerFactory.getLogger(KahaDBSchedulerIndexRebuildTest.class);
|
||||
|
||||
private BrokerService broker = null;
|
||||
private final int NUM_JOBS = 50;
|
||||
|
||||
static String basedir;
|
||||
static {
|
||||
try {
|
||||
ProtectionDomain protectionDomain = SchedulerDBVersionTest.class.getProtectionDomain();
|
||||
basedir = new File(new File(protectionDomain.getCodeSource().getLocation().getPath()), "../.").getCanonicalPath();
|
||||
} catch (IOException e) {
|
||||
basedir = ".";
|
||||
}
|
||||
}
|
||||
|
||||
private final File schedulerStoreDir = new File(basedir, "activemq-data/store/scheduler");
|
||||
private final File storeDir = new File(basedir, "activemq-data/store/");
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
LOG.info("Test Dir = {}", schedulerStoreDir);
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
if (broker != null) {
|
||||
broker.stop();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIndexRebuilds() throws Exception {
|
||||
IOHelper.deleteFile(schedulerStoreDir);
|
||||
|
||||
JobSchedulerStoreImpl schedulerStore = createScheduler();
|
||||
broker = createBroker(schedulerStore);
|
||||
broker.start();
|
||||
ActiveMQConnectionFactory cf = new ActiveMQConnectionFactory("vm://localhost");
|
||||
Connection connection = cf.createConnection();
|
||||
connection.start();
|
||||
for (int i = 0; i < NUM_JOBS; ++i) {
|
||||
scheduleRepeating(connection);
|
||||
}
|
||||
connection.close();
|
||||
|
||||
JobScheduler scheduler = schedulerStore.getJobScheduler("JMS");
|
||||
assertNotNull(scheduler);
|
||||
assertEquals(NUM_JOBS, scheduler.getAllJobs().size());
|
||||
|
||||
broker.stop();
|
||||
|
||||
IOHelper.delete(new File(schedulerStoreDir, "scheduleDB.data"));
|
||||
|
||||
schedulerStore = createScheduler();
|
||||
broker = createBroker(schedulerStore);
|
||||
broker.start();
|
||||
|
||||
scheduler = schedulerStore.getJobScheduler("JMS");
|
||||
assertNotNull(scheduler);
|
||||
assertEquals(NUM_JOBS, scheduler.getAllJobs().size());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIndexRebuildsAfterSomeJobsExpire() throws Exception {
|
||||
IOHelper.deleteFile(schedulerStoreDir);
|
||||
|
||||
JobSchedulerStoreImpl schedulerStore = createScheduler();
|
||||
broker = createBroker(schedulerStore);
|
||||
broker.start();
|
||||
ActiveMQConnectionFactory cf = new ActiveMQConnectionFactory("vm://localhost");
|
||||
Connection connection = cf.createConnection();
|
||||
connection.start();
|
||||
for (int i = 0; i < NUM_JOBS; ++i) {
|
||||
scheduleRepeating(connection);
|
||||
scheduleOneShot(connection);
|
||||
}
|
||||
connection.close();
|
||||
|
||||
JobScheduler scheduler = schedulerStore.getJobScheduler("JMS");
|
||||
assertNotNull(scheduler);
|
||||
assertEquals(NUM_JOBS * 2, scheduler.getAllJobs().size());
|
||||
|
||||
final JobScheduler awaitingOneShotTimeout = scheduler;
|
||||
assertTrue("One shot jobs should time out", Wait.waitFor(new Wait.Condition() {
|
||||
|
||||
@Override
|
||||
public boolean isSatisified() throws Exception {
|
||||
return awaitingOneShotTimeout.getAllJobs().size() == NUM_JOBS;
|
||||
}
|
||||
}, TimeUnit.MINUTES.toMillis(2)));
|
||||
|
||||
broker.stop();
|
||||
|
||||
IOHelper.delete(new File(schedulerStoreDir, "scheduleDB.data"));
|
||||
|
||||
schedulerStore = createScheduler();
|
||||
broker = createBroker(schedulerStore);
|
||||
broker.start();
|
||||
|
||||
scheduler = schedulerStore.getJobScheduler("JMS");
|
||||
assertNotNull(scheduler);
|
||||
assertEquals(NUM_JOBS, scheduler.getAllJobs().size());
|
||||
}
|
||||
|
||||
private void scheduleRepeating(Connection connection) throws Exception {
|
||||
Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
|
||||
Queue queue = session.createQueue("test.queue");
|
||||
MessageProducer producer = session.createProducer(queue);
|
||||
|
||||
TextMessage message = session.createTextMessage("test msg");
|
||||
long time = 360 * 1000;
|
||||
message.setLongProperty(ScheduledMessage.AMQ_SCHEDULED_DELAY, time);
|
||||
message.setLongProperty(ScheduledMessage.AMQ_SCHEDULED_PERIOD, 500);
|
||||
message.setIntProperty(ScheduledMessage.AMQ_SCHEDULED_REPEAT, -1);
|
||||
producer.send(message);
|
||||
producer.close();
|
||||
}
|
||||
|
||||
private void scheduleOneShot(Connection connection) throws Exception {
|
||||
Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
|
||||
Queue queue = session.createQueue("test.queue");
|
||||
MessageProducer producer = session.createProducer(queue);
|
||||
|
||||
TextMessage message = session.createTextMessage("test msg");
|
||||
long time = TimeUnit.SECONDS.toMillis(30);
|
||||
message.setLongProperty(ScheduledMessage.AMQ_SCHEDULED_DELAY, time);
|
||||
message.setIntProperty(ScheduledMessage.AMQ_SCHEDULED_REPEAT, 0);
|
||||
producer.send(message);
|
||||
producer.close();
|
||||
}
|
||||
|
||||
protected JobSchedulerStoreImpl createScheduler() {
|
||||
JobSchedulerStoreImpl scheduler = new JobSchedulerStoreImpl();
|
||||
scheduler.setDirectory(schedulerStoreDir);
|
||||
scheduler.setJournalMaxFileLength(10 * 1024);
|
||||
return scheduler;
|
||||
}
|
||||
|
||||
protected BrokerService createBroker(JobSchedulerStoreImpl scheduler) throws Exception {
|
||||
BrokerService answer = new BrokerService();
|
||||
answer.setJobSchedulerStore(scheduler);
|
||||
answer.setPersistent(true);
|
||||
answer.setDataDirectory(storeDir.getAbsolutePath());
|
||||
answer.setSchedulerSupport(true);
|
||||
answer.setUseJmx(false);
|
||||
return answer;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,204 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.activemq.broker.scheduler;
|
||||
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.security.ProtectionDomain;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.jms.Connection;
|
||||
import javax.jms.MessageProducer;
|
||||
import javax.jms.Queue;
|
||||
import javax.jms.Session;
|
||||
import javax.jms.TextMessage;
|
||||
|
||||
import org.apache.activemq.ActiveMQConnectionFactory;
|
||||
import org.apache.activemq.ScheduledMessage;
|
||||
import org.apache.activemq.broker.BrokerService;
|
||||
import org.apache.activemq.store.kahadb.disk.journal.DataFile;
|
||||
import org.apache.activemq.store.kahadb.scheduler.JobSchedulerStoreImpl;
|
||||
import org.apache.activemq.util.IOHelper;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
*Test that the store recovers even if some log files are missing.
|
||||
*/
|
||||
public class KahaDBSchedulerMissingJournalLogsTest {
|
||||
|
||||
static final Logger LOG = LoggerFactory.getLogger(KahaDBSchedulerIndexRebuildTest.class);
|
||||
|
||||
private BrokerService broker = null;
|
||||
private JobSchedulerStoreImpl schedulerStore = null;
|
||||
|
||||
private final int NUM_LOGS = 6;
|
||||
|
||||
static String basedir;
|
||||
static {
|
||||
try {
|
||||
ProtectionDomain protectionDomain = SchedulerDBVersionTest.class.getProtectionDomain();
|
||||
basedir = new File(new File(protectionDomain.getCodeSource().getLocation().getPath()), "../.").getCanonicalPath();
|
||||
} catch (IOException e) {
|
||||
basedir = ".";
|
||||
}
|
||||
}
|
||||
|
||||
private final File schedulerStoreDir = new File(basedir, "activemq-data/store/scheduler");
|
||||
private final File storeDir = new File(basedir, "activemq-data/store/");
|
||||
|
||||
/**
|
||||
* @throws java.lang.Exception
|
||||
*/
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
IOHelper.deleteFile(schedulerStoreDir);
|
||||
LOG.info("Test Dir = {}", schedulerStoreDir);
|
||||
|
||||
createBroker();
|
||||
broker.start();
|
||||
broker.waitUntilStarted();
|
||||
|
||||
schedulerStore = (JobSchedulerStoreImpl) broker.getJobSchedulerStore();
|
||||
}
|
||||
|
||||
/**
|
||||
* @throws java.lang.Exception
|
||||
*/
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
if (broker != null) {
|
||||
broker.stop();
|
||||
broker.waitUntilStopped();
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout=120 * 1000)
|
||||
public void testMissingLogsCausesBrokerToFail() throws Exception {
|
||||
fillUpSomeLogFiles();
|
||||
|
||||
int jobCount = schedulerStore.getJobScheduler("JMS").getAllJobs().size();
|
||||
LOG.info("There are {} jobs in the store.", jobCount);
|
||||
|
||||
List<File> toDelete = new ArrayList<File>();
|
||||
Map<Integer, DataFile> files = schedulerStore.getJournal().getFileMap();
|
||||
for (int i = files.size(); i > files.size() / 2; i--) {
|
||||
toDelete.add(files.get(i).getFile());
|
||||
}
|
||||
|
||||
broker.stop();
|
||||
broker.waitUntilStopped();
|
||||
|
||||
for (File file : toDelete) {
|
||||
LOG.info("File to delete: {}", file);
|
||||
IOHelper.delete(file);
|
||||
}
|
||||
|
||||
try {
|
||||
createBroker();
|
||||
fail("Should not start when logs are missing.");
|
||||
} catch (Exception e) {
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout=120 * 1000)
|
||||
public void testRecoverWhenSomeLogsAreMissing() throws Exception {
|
||||
fillUpSomeLogFiles();
|
||||
|
||||
int jobCount = schedulerStore.getJobScheduler("JMS").getAllJobs().size();
|
||||
LOG.info("There are {} jobs in the store.", jobCount);
|
||||
|
||||
List<File> toDelete = new ArrayList<File>();
|
||||
Map<Integer, DataFile> files = schedulerStore.getJournal().getFileMap();
|
||||
for (int i = files.size() - 1; i > files.size() / 2; i--) {
|
||||
toDelete.add(files.get(i).getFile());
|
||||
}
|
||||
|
||||
broker.stop();
|
||||
broker.waitUntilStopped();
|
||||
|
||||
for (File file : toDelete) {
|
||||
LOG.info("File to delete: {}", file);
|
||||
IOHelper.delete(file);
|
||||
}
|
||||
|
||||
schedulerStore = createScheduler();
|
||||
schedulerStore.setIgnoreMissingJournalfiles(true);
|
||||
|
||||
createBroker(schedulerStore);
|
||||
broker.start();
|
||||
broker.waitUntilStarted();
|
||||
|
||||
int postRecoverJobCount = schedulerStore.getJobScheduler("JMS").getAllJobs().size();
|
||||
assertTrue(postRecoverJobCount > 0);
|
||||
assertTrue(postRecoverJobCount < jobCount);
|
||||
}
|
||||
|
||||
private void fillUpSomeLogFiles() throws Exception {
|
||||
ActiveMQConnectionFactory cf = new ActiveMQConnectionFactory("vm://localhost");
|
||||
Connection connection = cf.createConnection();
|
||||
Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
|
||||
Queue queue = session.createQueue("test.queue");
|
||||
MessageProducer producer = session.createProducer(queue);
|
||||
connection.start();
|
||||
while (true) {
|
||||
scheduleRepeating(session, producer);
|
||||
if (schedulerStore.getJournal().getFileMap().size() == NUM_LOGS) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
connection.close();
|
||||
}
|
||||
|
||||
private void scheduleRepeating(Session session, MessageProducer producer) throws Exception {
|
||||
TextMessage message = session.createTextMessage("test msg");
|
||||
long time = 360 * 1000;
|
||||
message.setLongProperty(ScheduledMessage.AMQ_SCHEDULED_DELAY, time);
|
||||
message.setLongProperty(ScheduledMessage.AMQ_SCHEDULED_PERIOD, 500);
|
||||
message.setIntProperty(ScheduledMessage.AMQ_SCHEDULED_REPEAT, -1);
|
||||
producer.send(message);
|
||||
}
|
||||
|
||||
protected JobSchedulerStoreImpl createScheduler() {
|
||||
JobSchedulerStoreImpl scheduler = new JobSchedulerStoreImpl();
|
||||
scheduler.setDirectory(schedulerStoreDir);
|
||||
scheduler.setJournalMaxFileLength(10 * 1024);
|
||||
return scheduler;
|
||||
}
|
||||
|
||||
protected void createBroker() throws Exception {
|
||||
createBroker(createScheduler());
|
||||
}
|
||||
|
||||
protected void createBroker(JobSchedulerStoreImpl scheduler) throws Exception {
|
||||
broker = new BrokerService();
|
||||
broker.setJobSchedulerStore(scheduler);
|
||||
broker.setPersistent(true);
|
||||
broker.setDataDirectory(storeDir.getAbsolutePath());
|
||||
broker.setSchedulerSupport(true);
|
||||
broker.setUseJmx(false);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,164 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.activemq.broker.scheduler;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.security.ProtectionDomain;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import javax.jms.Connection;
|
||||
import javax.jms.Message;
|
||||
import javax.jms.MessageConsumer;
|
||||
import javax.jms.MessageListener;
|
||||
import javax.jms.MessageProducer;
|
||||
import javax.jms.Queue;
|
||||
import javax.jms.Session;
|
||||
import javax.jms.TextMessage;
|
||||
|
||||
import org.apache.activemq.ActiveMQConnectionFactory;
|
||||
import org.apache.activemq.ScheduledMessage;
|
||||
import org.apache.activemq.broker.BrokerService;
|
||||
import org.apache.activemq.store.kahadb.scheduler.JobSchedulerStoreImpl;
|
||||
import org.apache.activemq.util.IOHelper;
|
||||
import org.junit.After;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class SchedulerDBVersionTest {
|
||||
static String basedir;
|
||||
static {
|
||||
try {
|
||||
ProtectionDomain protectionDomain = SchedulerDBVersionTest.class.getProtectionDomain();
|
||||
basedir = new File(new File(protectionDomain.getCodeSource().getLocation().getPath()), "../..").getCanonicalPath();
|
||||
} catch (IOException e) {
|
||||
basedir = ".";
|
||||
}
|
||||
}
|
||||
|
||||
static final Logger LOG = LoggerFactory.getLogger(SchedulerDBVersionTest.class);
|
||||
final static File VERSION_LEGACY_JMS =
|
||||
new File(basedir + "/src/test/resources/org/apache/activemq/store/schedulerDB/legacy");
|
||||
|
||||
BrokerService broker = null;
|
||||
|
||||
protected BrokerService createBroker(JobSchedulerStoreImpl scheduler) throws Exception {
|
||||
BrokerService answer = new BrokerService();
|
||||
answer.setJobSchedulerStore(scheduler);
|
||||
answer.setPersistent(true);
|
||||
answer.setDataDirectory("target");
|
||||
answer.setSchedulerSupport(true);
|
||||
answer.setUseJmx(false);
|
||||
return answer;
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
if (broker != null) {
|
||||
broker.stop();
|
||||
}
|
||||
}
|
||||
|
||||
@Ignore("Used only when a new version of the store needs to archive it's test data.")
|
||||
@Test
|
||||
public void testCreateStore() throws Exception {
|
||||
JobSchedulerStoreImpl scheduler = new JobSchedulerStoreImpl();
|
||||
File dir = new File("src/test/resources/org/apache/activemq/store/schedulerDB/legacy");
|
||||
IOHelper.deleteFile(dir);
|
||||
scheduler.setDirectory(dir);
|
||||
scheduler.setJournalMaxFileLength(1024 * 1024);
|
||||
broker = createBroker(scheduler);
|
||||
broker.start();
|
||||
ActiveMQConnectionFactory cf = new ActiveMQConnectionFactory("vm://localhost");
|
||||
Connection connection = cf.createConnection();
|
||||
connection.start();
|
||||
scheduleRepeating(connection);
|
||||
connection.close();
|
||||
broker.stop();
|
||||
}
|
||||
|
||||
private void scheduleRepeating(Connection connection) throws Exception {
|
||||
Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
|
||||
Queue queue = session.createQueue("test.queue");
|
||||
MessageProducer producer = session.createProducer(queue);
|
||||
|
||||
TextMessage message = session.createTextMessage("test msg");
|
||||
long time = 1000;
|
||||
message.setLongProperty(ScheduledMessage.AMQ_SCHEDULED_DELAY, time);
|
||||
message.setLongProperty(ScheduledMessage.AMQ_SCHEDULED_PERIOD, 500);
|
||||
message.setIntProperty(ScheduledMessage.AMQ_SCHEDULED_REPEAT, -1);
|
||||
producer.send(message);
|
||||
producer.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testLegacyStoreConversion() throws Exception {
|
||||
doTestScheduleRepeated(VERSION_LEGACY_JMS);
|
||||
}
|
||||
|
||||
public void doTestScheduleRepeated(File existingStore) throws Exception {
|
||||
File testDir = new File("target/activemq-data/store/scheduler/versionDB");
|
||||
IOHelper.deleteFile(testDir);
|
||||
IOHelper.copyFile(existingStore, testDir);
|
||||
|
||||
final int NUMBER = 10;
|
||||
ActiveMQConnectionFactory cf = new ActiveMQConnectionFactory("vm://localhost");
|
||||
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
JobSchedulerStoreImpl scheduler = new JobSchedulerStoreImpl();
|
||||
scheduler.setDirectory(testDir);
|
||||
scheduler.setJournalMaxFileLength(1024 * 1024);
|
||||
BrokerService broker = createBroker(scheduler);
|
||||
broker.start();
|
||||
broker.waitUntilStarted();
|
||||
|
||||
final AtomicInteger count = new AtomicInteger();
|
||||
Connection connection = cf.createConnection();
|
||||
|
||||
Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
|
||||
Queue queue = session.createQueue("test.queue");
|
||||
|
||||
MessageConsumer consumer = session.createConsumer(queue);
|
||||
|
||||
final CountDownLatch latch = new CountDownLatch(NUMBER);
|
||||
consumer.setMessageListener(new MessageListener() {
|
||||
@Override
|
||||
public void onMessage(Message message) {
|
||||
LOG.info("Received scheduled message: {}", message);
|
||||
latch.countDown();
|
||||
count.incrementAndGet();
|
||||
}
|
||||
});
|
||||
|
||||
connection.start();
|
||||
assertEquals(latch.getCount(), NUMBER);
|
||||
latch.await(30, TimeUnit.SECONDS);
|
||||
|
||||
connection.close();
|
||||
broker.stop();
|
||||
broker.waitUntilStopped();
|
||||
|
||||
assertEquals(0, latch.getCount());
|
||||
}
|
||||
}
|
||||
}
|
|
@ -21,6 +21,7 @@
|
|||
log4j.rootLogger=INFO, out, stdout
|
||||
|
||||
#log4j.logger.org.apache.activemq.broker.scheduler=DEBUG
|
||||
#log4j.logger.org.apache.activemq.store.kahadb.scheduler=DEBUG
|
||||
#log4j.logger.org.apache.activemq.network.DemandForwardingBridgeSupport=DEBUG
|
||||
#log4j.logger.org.apache.activemq.transport.failover=TRACE
|
||||
#log4j.logger.org.apache.activemq.store.jdbc=TRACE
|
||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Loading…
Reference in New Issue