Remove deprecated modules. Converted many AMQ store tests to be LevelDB tests.

git-svn-id: https://svn.apache.org/repos/asf/activemq/trunk@1445633 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Hiram R. Chirino 2013-02-13 14:47:40 +00:00
parent dc86b87236
commit b2fca26213
245 changed files with 312 additions and 30055 deletions

View File

@ -59,10 +59,6 @@
<artifactId>activemq-http</artifactId>
</dependency>
<!-- Stores -->
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>activemq-amq-store</artifactId>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>activemq-kahadb-store</artifactId>

View File

@ -1,93 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.activemq</groupId>
<artifactId>activemq-parent</artifactId>
<version>5.9-SNAPSHOT</version>
</parent>
<artifactId>activemq-amq-store</artifactId>
<packaging>jar</packaging>
<name>ActiveMQ :: AMQ Store</name>
<description>The ActiveMQ AMQ Store Implementation</description>
<properties>
</properties>
<dependencies>
<!-- =============================== -->
<!-- Required Dependencies -->
<!-- =============================== -->
<dependency>
<groupId>org.apache.activemq</groupId>
<artifactId>activemq-broker</artifactId>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>activeio-core</artifactId>
</dependency>
<!-- =============================== -->
<!-- Optional Dependencies -->
<!-- =============================== -->
<!-- needed for the amq-store tools -->
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>activemq-console</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.apache.velocity</groupId>
<artifactId>velocity</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.apache.servicemix.bundles</groupId>
<artifactId>org.apache.servicemix.bundles.josql</artifactId>
<optional>true</optional>
</dependency>
<!-- =============================== -->
<!-- Testing Dependencies -->
<!-- =============================== -->
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
</plugins>
</build>
<profiles>
</profiles>
</project>

View File

@ -1,362 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.console.command.store.amq;
import java.io.File;
import java.io.InputStream;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Scanner;
import org.apache.activemq.command.ActiveMQBlobMessage;
import org.apache.activemq.command.ActiveMQBytesMessage;
import org.apache.activemq.command.ActiveMQMapMessage;
import org.apache.activemq.command.ActiveMQMessage;
import org.apache.activemq.command.ActiveMQObjectMessage;
import org.apache.activemq.command.ActiveMQStreamMessage;
import org.apache.activemq.command.ActiveMQTextMessage;
import org.apache.activemq.command.DataStructure;
import org.apache.activemq.command.JournalQueueAck;
import org.apache.activemq.command.JournalTopicAck;
import org.apache.activemq.command.JournalTrace;
import org.apache.activemq.command.JournalTransaction;
import org.apache.activemq.kaha.impl.async.Location;
import org.apache.activemq.kaha.impl.async.ReadOnlyAsyncDataManager;
import org.apache.activemq.openwire.OpenWireFormat;
import org.apache.activemq.util.ByteSequence;
import org.apache.activemq.wireformat.WireFormat;
import org.apache.velocity.Template;
import org.apache.velocity.VelocityContext;
import org.apache.velocity.app.Velocity;
import org.apache.velocity.app.VelocityEngine;
import org.josql.Query;
/**
* Allows you to view the contents of a Journal.
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
public class AMQJournalTool {
private final ArrayList<File> dirs = new ArrayList<File>();
private final WireFormat wireFormat = new OpenWireFormat();
private final HashMap<String, String> resources = new HashMap<String, String>();
private String messageFormat = "${location.dataFileId},${location.offset}|${type}|${record.destination}|${record.messageId}|${record.properties}|${body}";
private String topicAckFormat = "${location.dataFileId},${location.offset}|${type}|${record.destination}|${record.clientId}|${record.subscritionName}|${record.messageId}";
private String queueAckFormat = "${location.dataFileId},${location.offset}|${type}|${record.destination}|${record.messageAck.lastMessageId}";
private String transactionFormat = "${location.dataFileId},${location.offset}|${type}|${record.transactionId}";
private String traceFormat = "${location.dataFileId},${location.offset}|${type}|${record.message}";
private String unknownFormat = "${location.dataFileId},${location.offset}|${type}|${record.class.name}";
private String where;
private VelocityContext context;
private VelocityEngine velocity;
private boolean help;
public static void main(String[] args) throws Exception {
AMQJournalTool consumerTool = new AMQJournalTool();
String[] directories = CommandLineSupport
.setOptions(consumerTool, args);
if (directories.length < 1) {
System.out
.println("Please specify the directories with journal data to scan");
return;
}
for (int i = 0; i < directories.length; i++) {
consumerTool.getDirs().add(new File(directories[i]));
}
consumerTool.execute();
}
/**
* Creates a new VelocityContext that is pre-populated with the JVMs
* system properties.
*
* @return - the VelocityContext that got created.
*/
protected VelocityContext createVelocityContext() {
VelocityContext ctx = new VelocityContext();
List keys = Arrays.asList(ctx.getKeys());
for (Iterator iterator = System.getProperties().entrySet()
.iterator(); iterator.hasNext();) {
Map.Entry kv = (Map.Entry) iterator.next();
String name = (String) kv.getKey();
String value = (String) kv.getValue();
if (!keys.contains(name)) {
ctx.put(name, value);
}
}
return ctx;
}
public void execute() throws Exception {
if( help ) {
showHelp();
return;
}
if (getDirs().size() < 1) {
System.out.println("");
System.out.println("Invalid Usage: Please specify the directories with journal data to scan");
System.out.println("");
showHelp();
return;
}
for (File dir : getDirs()) {
if( !dir.exists() ) {
System.out.println("");
System.out.println("Invalid Usage: the directory '"+dir.getPath()+"' does not exist");
System.out.println("");
showHelp();
return;
}
if( !dir.isDirectory() ) {
System.out.println("");
System.out.println("Invalid Usage: the argument '"+dir.getPath()+"' is not a directory");
System.out.println("");
showHelp();
return;
}
}
context = createVelocityContext();
velocity = new VelocityEngine();
velocity.setProperty(Velocity.RESOURCE_LOADER, "all");
velocity.setProperty("all.resource.loader.class", CustomResourceLoader.class.getName());
velocity.init();
resources.put("message", messageFormat);
resources.put("topicAck", topicAckFormat);
resources.put("queueAck", queueAckFormat);
resources.put("transaction", transactionFormat);
resources.put("trace", traceFormat);
resources.put("unknown", unknownFormat);
Query query = null;
if (where != null) {
query = new Query();
query.parse("select * from "+Entry.class.getName()+" where "+where);
}
ReadOnlyAsyncDataManager manager = new ReadOnlyAsyncDataManager(getDirs());
manager.start();
try {
Location curr = manager.getFirstLocation();
while (curr != null) {
ByteSequence data = manager.read(curr);
DataStructure c = (DataStructure) wireFormat.unmarshal(data);
Entry entry = new Entry();
entry.setLocation(curr);
entry.setRecord(c);
entry.setData(data);
entry.setQuery(query);
process(entry);
curr = manager.getNextLocation(curr);
}
} finally {
manager.close();
}
}
private void showHelp() {
InputStream is = AMQJournalTool.class.getResourceAsStream("help.txt");
Scanner scanner = new Scanner(is);
while (scanner.hasNextLine()) {
String line = scanner.nextLine();
System.out.println(line);
}
scanner.close(); }
private void process(Entry entry) throws Exception {
DataStructure record = entry.getRecord();
switch (record.getDataStructureType()) {
case ActiveMQMessage.DATA_STRUCTURE_TYPE:
entry.setType("ActiveMQMessage");
entry.setFormater("message");
display(entry);
break;
case ActiveMQBytesMessage.DATA_STRUCTURE_TYPE:
entry.setType("ActiveMQBytesMessage");
entry.setFormater("message");
display(entry);
break;
case ActiveMQBlobMessage.DATA_STRUCTURE_TYPE:
entry.setType("ActiveMQBlobMessage");
entry.setFormater("message");
display(entry);
break;
case ActiveMQMapMessage.DATA_STRUCTURE_TYPE:
entry.setType("ActiveMQMapMessage");
entry.setFormater("message");
display(entry);
break;
case ActiveMQObjectMessage.DATA_STRUCTURE_TYPE:
entry.setType("ActiveMQObjectMessage");
entry.setFormater("message");
display(entry);
break;
case ActiveMQStreamMessage.DATA_STRUCTURE_TYPE:
entry.setType("ActiveMQStreamMessage");
entry.setFormater("message");
display(entry);
break;
case ActiveMQTextMessage.DATA_STRUCTURE_TYPE:
entry.setType("ActiveMQTextMessage");
entry.setFormater("message");
display(entry);
break;
case JournalQueueAck.DATA_STRUCTURE_TYPE:
entry.setType("Queue Ack");
entry.setFormater("queueAck");
display(entry);
break;
case JournalTopicAck.DATA_STRUCTURE_TYPE:
entry.setType("Topic Ack");
entry.setFormater("topicAck");
display(entry);
break;
case JournalTransaction.DATA_STRUCTURE_TYPE:
entry.setType(getType((JournalTransaction) record));
entry.setFormater("transaction");
display(entry);
break;
case JournalTrace.DATA_STRUCTURE_TYPE:
entry.setType("Trace");
entry.setFormater("trace");
display(entry);
break;
default:
entry.setType("Unknown");
entry.setFormater("unknown");
display(entry);
break;
}
}
private String getType(JournalTransaction record) {
switch (record.getType()) {
case JournalTransaction.XA_PREPARE:
return "XA Prepare";
case JournalTransaction.XA_COMMIT:
return "XA Commit";
case JournalTransaction.XA_ROLLBACK:
return "XA Rollback";
case JournalTransaction.LOCAL_COMMIT:
return "Commit";
case JournalTransaction.LOCAL_ROLLBACK:
return "Rollback";
}
return "Unknown Transaction";
}
private void display(Entry entry) throws Exception {
if (entry.getQuery() != null) {
List list = Collections.singletonList(entry);
List results = entry.getQuery().execute(list).getResults();
if (results.isEmpty()) {
return;
}
}
CustomResourceLoader.setResources(resources);
try {
context.put("location", entry.getLocation());
context.put("record", entry.getRecord());
context.put("type", entry.getType());
if (entry.getRecord() instanceof ActiveMQMessage) {
context.put("body", new MessageBodyFormatter(
(ActiveMQMessage) entry.getRecord()));
}
Template template = velocity.getTemplate(entry.getFormater());
PrintWriter writer = new PrintWriter(System.out);
template.merge(context, writer);
writer.println();
writer.flush();
} finally {
CustomResourceLoader.setResources(null);
}
}
public void setMessageFormat(String messageFormat) {
this.messageFormat = messageFormat;
}
public void setTopicAckFormat(String ackFormat) {
this.topicAckFormat = ackFormat;
}
public void setTransactionFormat(String transactionFormat) {
this.transactionFormat = transactionFormat;
}
public void setTraceFormat(String traceFormat) {
this.traceFormat = traceFormat;
}
public void setUnknownFormat(String unknownFormat) {
this.unknownFormat = unknownFormat;
}
public void setQueueAckFormat(String queueAckFormat) {
this.queueAckFormat = queueAckFormat;
}
public String getQuery() {
return where;
}
public void setWhere(String query) {
this.where = query;
}
public boolean isHelp() {
return help;
}
public void setHelp(boolean help) {
this.help = help;
}
/**
* @return the dirs
*/
public ArrayList<File> getDirs() {
return dirs;
}
}

View File

@ -1,54 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.console.command.store.amq;
import java.io.File;
import java.util.List;
import org.apache.activemq.console.CommandContext;
import org.apache.activemq.console.command.Command;
public class AMQJournalToolCommand implements Command {
private CommandContext context;
@Override
public String getName() {
return "journal-audit";
}
@Override
public String getOneLineDescription() {
return "Allows you to view records stored in the persistent journal.";
}
public void execute(List<String> tokens) throws Exception {
AMQJournalTool consumerTool = new AMQJournalTool();
String args[] = new String[tokens.size()];
tokens.toArray(args);
String[] directories = CommandLineSupport.setOptions(consumerTool, args);
for (int i = 0; i < directories.length; i++) {
consumerTool.getDirs().add(new File(directories[i]));
}
consumerTool.execute();
}
public void setCommandContext(CommandContext context) {
this.context = context;
}
}

View File

@ -1,101 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.console.command.store.amq;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.util.HashMap;
import org.apache.commons.collections.ExtendedProperties;
import org.apache.velocity.exception.ResourceNotFoundException;
import org.apache.velocity.runtime.RuntimeServices;
import org.apache.velocity.runtime.resource.Resource;
import org.apache.velocity.runtime.resource.loader.FileResourceLoader;
import org.apache.velocity.runtime.resource.loader.ResourceLoader;
public class CustomResourceLoader extends ResourceLoader {
private final static ThreadLocal<HashMap<String, String>> resourcesTL = new ThreadLocal<HashMap<String, String>>();
private final FileResourceLoader fileResourceLoader = new FileResourceLoader();
@Override
public void commonInit(RuntimeServices rs, ExtendedProperties configuration) {
super.commonInit(rs, configuration);
fileResourceLoader.commonInit(rs, configuration);
}
public void init( ExtendedProperties configuration)
{
fileResourceLoader.init(configuration);
// AMQ-3665: Turn on template caching as otherwise the journal reader
// could run out of memory on large journal files
this.setCachingOn(true);
}
/**
*/
public synchronized InputStream getResourceStream( String name )
throws ResourceNotFoundException
{
InputStream result = null;
if (name == null || name.length() == 0)
{
throw new ResourceNotFoundException ("No template name provided");
}
String value = null;
HashMap<String, String> resources = resourcesTL.get();
if( resources!=null ) {
value = resources.get(name);
}
if( value == null ) {
result = this.fileResourceLoader.getResourceStream(name);
} else {
try
{
result = new ByteArrayInputStream(value.getBytes());
}
catch( Exception e )
{
throw new ResourceNotFoundException( e.getMessage() );
}
}
return result;
}
public boolean isSourceModified(Resource resource)
{
return false;
}
public long getLastModified(Resource resource)
{
return 0;
}
static public HashMap<String, String> getResources() {
return resourcesTL.get();
}
static public void setResources(HashMap<String, String> arg0) {
resourcesTL.set(arg0);
}
}

View File

@ -1,70 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.console.command.store.amq;
import org.apache.activemq.command.DataStructure;
import org.apache.activemq.kaha.impl.async.Location;
import org.apache.activemq.util.ByteSequence;
import org.josql.Query;
public class Entry {
Location location;
DataStructure record;
private ByteSequence data;
private String type;
private String formater;
private Query query;
public Location getLocation() {
return location;
}
public void setLocation(Location location) {
this.location = location;
}
public DataStructure getRecord() {
return record;
}
public void setRecord(DataStructure record) {
this.record = record;
}
public void setData(ByteSequence data) {
this.data = data;
}
public void setType(String type) {
this.type = type;
}
public ByteSequence getData() {
return data;
}
public String getType() {
return type;
}
public void setFormater(String formater) {
this.formater = formater;
}
public String getFormater() {
return formater;
}
public void setQuery(Query query) {
this.query = query;
}
public Query getQuery() {
return query;
}
}

View File

@ -1,62 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.console.command.store.amq;
import javax.jms.JMSException;
import org.apache.activemq.command.ActiveMQBlobMessage;
import org.apache.activemq.command.ActiveMQBytesMessage;
import org.apache.activemq.command.ActiveMQMapMessage;
import org.apache.activemq.command.ActiveMQMessage;
import org.apache.activemq.command.ActiveMQObjectMessage;
import org.apache.activemq.command.ActiveMQStreamMessage;
import org.apache.activemq.command.ActiveMQTextMessage;
import org.apache.activemq.util.ByteSequence;
public class MessageBodyFormatter {
final ActiveMQMessage message;
public MessageBodyFormatter(ActiveMQMessage message) {
this.message=message;
}
@Override
public String toString() {
try {
switch (message.getDataStructureType()) {
case ActiveMQMessage.DATA_STRUCTURE_TYPE:
return "";
case ActiveMQBlobMessage.DATA_STRUCTURE_TYPE:
ActiveMQBlobMessage blob = (ActiveMQBlobMessage) message;
return blob.getRemoteBlobUrl();
case ActiveMQMapMessage.DATA_STRUCTURE_TYPE:
ActiveMQMapMessage map = (ActiveMQMapMessage)message;
return map.getContentMap().toString();
case ActiveMQTextMessage.DATA_STRUCTURE_TYPE:
ActiveMQTextMessage text = (ActiveMQTextMessage)message;
return text.getText();
case ActiveMQBytesMessage.DATA_STRUCTURE_TYPE:
case ActiveMQObjectMessage.DATA_STRUCTURE_TYPE:
case ActiveMQStreamMessage.DATA_STRUCTURE_TYPE:
ByteSequence data = message.getContent();
return "binary payload {length="+data.getLength()+", compressed="+message.isCompressed()+"}";
}
} catch (JMSException e) {
}
return "";
}
}

View File

@ -1,85 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.console.command.store.amq.reader;
import java.util.Iterator;
import javax.jms.Message;
import org.apache.activemq.broker.region.MessageReference;
import org.apache.activemq.filter.BooleanExpression;
import org.apache.activemq.filter.MessageEvaluationContext;
import org.apache.activemq.selector.SelectorParser;
/**
* An Iterator for the AMQReader
*
*/
class AMQIterator implements Iterator<Message>{
private AMQReader reader;
private BooleanExpression expression;
private MessageLocation currentLocation;
private MessageLocation nextLocation;
private boolean valid=true;
AMQIterator(AMQReader reader, BooleanExpression expression){
this.reader=reader;
this.expression=expression;
}
public boolean hasNext() {
try {
this.nextLocation = reader.getNextMessage(currentLocation);
Message next = nextLocation != null ? nextLocation.getMessage()
: null;
if (expression == null) {
return next != null;
} else {
while (next != null) {
MessageEvaluationContext context = new MessageEvaluationContext();
context.setMessageReference((MessageReference) next);
if (expression.matches(context)) {
return true;
}
this.nextLocation = reader.getNextMessage(currentLocation);
next = nextLocation != null ? nextLocation.getMessage()
: null;
}
valid=false;
return false;
}
} catch (Exception e) {
throw new RuntimeException(
"Failed to get next message from reader ", e);
}
}
public Message next() {
if (valid && (nextLocation != null || hasNext())) {
this.currentLocation=nextLocation;
return nextLocation.getMessage();
}
return null;
}
public void remove() {
throw new IllegalStateException("Not supported");
}
}

View File

@ -1,172 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.console.command.store.amq.reader;
import java.io.File;
import java.io.IOException;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
import javax.jms.InvalidSelectorException;
import javax.jms.Message;
import org.apache.activemq.command.DataStructure;
import org.apache.activemq.filter.BooleanExpression;
import org.apache.activemq.kaha.impl.async.AsyncDataManager;
import org.apache.activemq.kaha.impl.async.Location;
import org.apache.activemq.openwire.OpenWireFormat;
import org.apache.activemq.selector.SelectorParser;
import org.apache.activemq.util.ByteSequence;
import org.apache.activemq.wireformat.WireFormat;
/**
* Reads and iterates through data log files for the AMQMessage Store
*
*/
public class AMQReader implements Iterable<Message> {
private AsyncDataManager dataManager;
private WireFormat wireFormat = new OpenWireFormat();
private File file;
private BooleanExpression expression;
/**
* List all the data files in a directory
* @param directory
* @return result
* @throws IOException
*/
public static Set<File> listDataFiles(File directory) throws IOException{
Set<File>result = new HashSet<File>();
if (directory == null || !directory.exists() || !directory.isDirectory()) {
throw new IOException("Invalid Directory " + directory);
}
AsyncDataManager dataManager = new AsyncDataManager();
dataManager.setDirectory(directory);
dataManager.start();
Set<File> set = dataManager.getFiles();
if (set != null) {
result.addAll(set);
}
dataManager.close();
return result;
}
/**
* Create the AMQReader to read a directory of amq data logs - or an
* individual data log file
*
* @param file the directory - or file
* @throws IOException
* @throws InvalidSelectorException
* @throws IOException
* @throws InvalidSelectorException
*/
public AMQReader(File file) throws InvalidSelectorException, IOException {
this(file,null);
}
/**
* Create the AMQReader to read a directory of amq data logs - or an
* individual data log file
*
* @param file the directory - or file
* @param selector the JMS selector or null to select all
* @throws IOException
* @throws InvalidSelectorException
*/
public AMQReader(File file, String selector) throws IOException, InvalidSelectorException {
String str = selector != null ? selector.trim() : null;
if (str != null && str.length() > 0) {
this.expression=SelectorParser.parse(str);
}
dataManager = new AsyncDataManager();
dataManager.setArchiveDataLogs(false);
if (file.isDirectory()) {
dataManager.setDirectory(file);
} else {
dataManager.setDirectory(file.getParentFile());
dataManager.setDirectoryArchive(file);
this.file = file;
}
dataManager.start();
}
public Iterator<Message> iterator() {
return new AMQIterator(this,this.expression);
}
protected MessageLocation getNextMessage(MessageLocation lastLocation)
throws IllegalStateException, IOException {
if (this.file != null) {
return getInternalNextMessage(this.file, lastLocation);
}
return getInternalNextMessage(lastLocation);
}
private MessageLocation getInternalNextMessage(MessageLocation lastLocation)
throws IllegalStateException, IOException {
return getInternalNextMessage(null, lastLocation);
}
private MessageLocation getInternalNextMessage(File file,
MessageLocation lastLocation) throws IllegalStateException,
IOException {
MessageLocation result = lastLocation;
if (result != null) {
result.setMessage(null);
}
Message message = null;
Location pos = lastLocation != null ? lastLocation.getLocation() : null;
while ((pos = getNextLocation(file, pos)) != null) {
message = getMessage(pos);
if (message != null) {
if (result == null) {
result = new MessageLocation();
}
result.setMessage(message);
break;
}
}
result.setLocation(pos);
if (pos == null && message == null) {
result = null;
} else {
result.setLocation(pos);
}
return result;
}
private Location getNextLocation(File file, Location last)
throws IllegalStateException, IOException {
if (file != null) {
return dataManager.getNextLocation(file, last, true);
}
return dataManager.getNextLocation(last);
}
private Message getMessage(Location location) throws IOException {
ByteSequence data = dataManager.read(location);
DataStructure c = (DataStructure) wireFormat.unmarshal(data);
if (c instanceof Message) {
return (Message) c;
}
return null;
}
}

View File

@ -1,60 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.console.command.store.amq.reader;
import javax.jms.Message;
import org.apache.activemq.kaha.impl.async.Location;
/**
* A holder for a message
*
*/
class MessageLocation {
private Message message;
private Location location;
/**
* @return the location
*/
public Location getLocation() {
return location;
}
/**
* @param location
*/
public void setLocation(Location location) {
this.location = location;
}
/**
* @return the message
*/
public Message getMessage() {
return message;
}
/**
* @param message
*/
public void setMessage(Message message) {
this.message = message;
}
}

View File

@ -1,55 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
/**
* Implementation of a Marshaller for byte arrays
*
*
*/
public class BytesMarshaller implements Marshaller {
/**
* Write the payload of this entry to the RawContainer
*
* @param object
* @param dataOut
* @throws IOException
*/
public void writePayload(Object object, DataOutput dataOut) throws IOException {
byte[] data = (byte[])object;
dataOut.writeInt(data.length);
dataOut.write(data);
}
/**
* Read the entry from the RawContainer
*
* @param dataIn
* @return unmarshalled object
* @throws IOException
*/
public Object readPayload(DataInput dataIn) throws IOException {
int size = dataIn.readInt();
byte[] data = new byte[size];
dataIn.readFully(data);
return data;
}
}

View File

@ -1,57 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.activemq.openwire.OpenWireFormat;
import org.apache.activemq.util.ByteSequence;
import org.apache.activemq.wireformat.WireFormat;
/**
* Marshall a Message or a MessageReference
*
*
*/
public class CommandMarshaller implements Marshaller<Object> {
private WireFormat wireFormat;
public CommandMarshaller(WireFormat wireFormat) {
this.wireFormat = wireFormat;
}
public CommandMarshaller() {
this(new OpenWireFormat());
}
public void writePayload(Object object, DataOutput dataOut) throws IOException {
ByteSequence packet = wireFormat.marshal(object);
dataOut.writeInt(packet.length);
dataOut.write(packet.data, packet.offset, packet.length);
}
public Object readPayload(DataInput dataIn) throws IOException {
int size = dataIn.readInt();
byte[] data = new byte[size];
dataIn.readFully(data);
return wireFormat.unmarshal(new ByteSequence(data));
}
}

View File

@ -1,81 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha;
import java.io.Externalizable;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
/**
* Used by RootContainers
*
*
*/
public class ContainerId implements Externalizable {
private static final long serialVersionUID = -8883779541021821943L;
private Object key;
private String dataContainerName;
public ContainerId() {
}
public ContainerId(Object key, String dataContainerName) {
this.key = key;
this.dataContainerName = dataContainerName;
}
/**
* @return Returns the dataContainerPrefix.
*/
public String getDataContainerName() {
return dataContainerName;
}
/**
* @return Returns the key.
*/
public Object getKey() {
return key;
}
public int hashCode() {
return key.hashCode() ^ dataContainerName.hashCode();
}
public boolean equals(Object obj) {
if (obj == null || obj.getClass() != ContainerId.class) {
return false;
}
ContainerId other = (ContainerId)obj;
return other.key.equals(this.key) && other.dataContainerName.equals(this.dataContainerName);
}
public void writeExternal(ObjectOutput out) throws IOException {
out.writeUTF(getDataContainerName());
out.writeObject(key);
}
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
dataContainerName = in.readUTF();
key = in.readObject();
}
public String toString() {
return "CID{" + dataContainerName + ":" + key + "}";
}
}

View File

@ -1,26 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha;
/**
* Index MBean
*
*/
public interface IndexMBean {
int getSize();
boolean isTransient();
}

View File

@ -1,186 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha;
import java.util.List;
import java.util.NoSuchElementException;
/**
* Represents a container of persistent objects in the store Acts as a map, but
* values can be retrieved in insertion order
*
*
*/
public interface ListContainer<V> extends List<V> {
/**
* The container is created or retrieved in an unloaded state. load
* populates the container will all the indexes used etc and should be
* called before any operations on the container
*/
void load();
/**
* unload indexes from the container
*
*/
void unload();
/**
* @return true if the indexes are loaded
*/
boolean isLoaded();
/**
* For homogenous containers can set a custom marshaller for loading values
* The default uses Object serialization
*
* @param marshaller
*/
void setMarshaller(Marshaller marshaller);
/**
* @return the id the MapContainer was create with
*/
Object getId();
/**
* @return the number of values in the container
*/
int size();
/**
* Inserts the given element at the beginning of this list.
*
* @param o the element to be inserted at the beginning of this list.
*/
void addFirst(V o);
/**
* Appends the given element to the end of this list. (Identical in function
* to the <tt>add</tt> method; included only for consistency.)
*
* @param o the element to be inserted at the end of this list.
*/
void addLast(V o);
/**
* Removes and returns the first element from this list.
*
* @return the first element from this list.
* @throws NoSuchElementException if this list is empty.
*/
V removeFirst();
/**
* Removes and returns the last element from this list.
*
* @return the last element from this list.
* @throws NoSuchElementException if this list is empty.
*/
V removeLast();
/**
* remove an objecr from the list without retrieving the old value from the
* store
*
* @param position
* @return true if successful
*/
boolean doRemove(int position);
/**
* add an Object to the list but get a StoreEntry of its position
*
* @param object
* @return the entry in the Store
*/
StoreEntry placeLast(V object);
/**
* insert an Object in first position int the list but get a StoreEntry of
* its position
*
* @param object
* @return the location in the Store
*/
StoreEntry placeFirst(V object);
/**
* Advanced feature = must ensure the object written doesn't overwrite other
* objects in the container
*
* @param entry
* @param object
*/
void update(StoreEntry entry, V object);
/**
* Retrieve an Object from the Store by its location
*
* @param entry
* @return the Object at that entry
*/
V get(StoreEntry entry);
/**
* Get the StoreEntry for the first item of the list
*
* @return the first StoreEntry or null if the list is empty
*/
StoreEntry getFirst();
/**
* Get the StoreEntry for the last item of the list
*
* @return the last StoreEntry or null if the list is empty
*/
StoreEntry getLast();
/**
* Get the next StoreEntry from the list
*
* @param entry
* @return the next StoreEntry or null
*/
StoreEntry getNext(StoreEntry entry);
/**
* Get the previous StoreEntry from the list
*
* @param entry
* @return the previous store entry or null
*/
StoreEntry getPrevious(StoreEntry entry);
/**
* remove the Object at the StoreEntry
*
* @param entry
* @return true if successful
*/
boolean remove(StoreEntry entry);
/**
* It's possible that a StoreEntry could be come stale this will return an
* upto date entry for the StoreEntry position
*
* @param entry old entry
* @return a refreshed StoreEntry
*/
StoreEntry refresh(StoreEntry entry);
}

View File

@ -1,291 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha;
import java.util.Collection;
import java.util.Map;
import java.util.Set;
/**
* Represents a container of persistent objects in the store Acts as a map, but
* values can be retrieved in insertion order
*
*
*/
public interface MapContainer<K, V> extends Map<K, V> {
/**
* The container is created or retrieved in an unloaded state. load
* populates the container will all the indexes used etc and should be
* called before any operations on the container
*/
void load();
/**
* unload indexes from the container
*
*/
void unload();
/**
* @return true if the indexes are loaded
*/
boolean isLoaded();
/**
* For homogenous containers can set a custom marshaller for loading keys
* The default uses Object serialization
*
* @param keyMarshaller
*/
void setKeyMarshaller(Marshaller<K> keyMarshaller);
/**
* For homogenous containers can set a custom marshaller for loading values
* The default uses Object serialization
*
* @param valueMarshaller
*
*/
void setValueMarshaller(Marshaller<V> valueMarshaller);
/**
* @return the id the MapContainer was create with
*/
Object getId();
/**
* @return the number of values in the container
*/
int size();
/**
* @return true if there are no values stored in the container
*/
boolean isEmpty();
/**
* @param key
* @return true if the container contains the key
*/
boolean containsKey(Object key);
/**
* Get the value associated with the key
*
* @param key
* @return the value associated with the key from the store
*/
V get(Object key);
/**
* @param o
* @return true if the MapContainer contains the value o
*/
boolean containsValue(Object o);
/**
* Add add entries in the supplied Map
*
* @param map
*/
void putAll(Map<? extends K, ? extends V> map);
/**
* @return a Set of all the keys
*/
Set<K> keySet();
/**
* @return a collection of all the values - the values will be lazily pulled
* out of the store if iterated etc.
*/
Collection<V> values();
/**
* @return a Set of all the Map.Entry instances - the values will be lazily
* pulled out of the store if iterated etc.
*/
Set<Map.Entry<K, V>> entrySet();
/**
* Add an entry
*
* @param key
* @param value
* @return the old value for the key
*/
V put(K key, V value);
/**
* remove an entry associated with the key
*
* @param key
* @return the old value assocaited with the key or null
*/
V remove(Object key);
/**
* empty the container
*/
void clear();
/**
* Add an entry to the Store Map
*
* @param key
* @param Value
* @return the StoreEntry associated with the entry
*/
StoreEntry place(K key, V value);
/**
* Remove an Entry from ther Map
*
* @param entry
*/
void remove(StoreEntry entry);
/**
* Get the Key object from it's location
*
* @param keyLocation
* @return the key for the entry
*/
K getKey(StoreEntry keyLocation);
/**
* Get the value from it's location
*
* @param Valuelocation
* @return the Object
*/
V getValue(StoreEntry valueLocation);
/**
* Get the StoreEntry for the first value in the Map
*
* @return the first StoreEntry or null if the map is empty
*/
StoreEntry getFirst();
/**
* Get the StoreEntry for the last value item of the Map
*
* @return the last StoreEntry or null if the list is empty
*/
StoreEntry getLast();
/**
* Get the next StoreEntry value from the map
*
* @param entry
* @return the next StoreEntry or null
*/
StoreEntry getNext(StoreEntry entry);
/**
* Get the previous StoreEntry from the map
*
* @param entry
* @return the previous store entry or null
*/
StoreEntry getPrevious(StoreEntry entry);
/**
* It's possible that a StoreEntry could be come stale this will return an
* upto date entry for the StoreEntry position
*
* @param entry old entry
* @return a refreshed StoreEntry
*/
StoreEntry refresh(StoreEntry entry);
/**
* Get the StoreEntry associated with the key
*
* @param key
* @return the StoreEntry
*/
StoreEntry getEntry(K key);
/**
* Set the index bin size
* @param size
*/
void setIndexBinSize(int size);
/**
* @return index bin size
*/
int getIndexBinSize();
/**
* Add the index key size
* @param size
*/
void setIndexKeySize(int size);
/**
* @return the index key size
*/
int getIndexKeySize();
/**
* Set the index page size
* @param size
*/
void setIndexPageSize(int size);
/**
* @return the index page size
*/
int getIndexPageSize();
/**
* set the meximum bin size
*/
void setIndexMaxBinSize(int size);
/**
* @return the maximum bin size
* @return
*/
int getIndexMaxBinSize();
/**
* @return the loadFactor
*/
public int getIndexLoadFactor();
/**
* @param loadFactor the loadFactor to set
*/
public void setIndexLoadFactor(int loadFactor);
/**
* @return the Index MBean
*/
IndexMBean getIndexMBean();
/**
* Clean up all state associated with this container.
*/
void delete();
}

View File

@ -1,49 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
/**
*Marshaller for marshalling in/out objects to a RawContainer
*
*
*/
public interface Marshaller<T> {
/**
* Write the payload of this entry to the RawContainer
* @param object
* @param dataOut
* @throws IOException
*/
void writePayload(T object, DataOutput dataOut) throws IOException;
/**
* Read the entry from the RawContainer
* @param dataIn
* @return unmarshalled object
* @throws IOException
*/
T readPayload(DataInput dataIn) throws IOException;
}

View File

@ -1,29 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha;
import org.apache.activemq.command.MessageAck;
import org.apache.activemq.kaha.impl.async.Location;
public final class MessageAckWithLocation extends MessageAck {
public final Location location;
public MessageAckWithLocation(MessageAck ack, Location location) {
ack.copy(this);
this.location = location;
}
}

View File

@ -1,51 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.activemq.command.MessageId;
/**
* Implementation of a Marshaller for MessageIds
*
*
*/
public class MessageIdMarshaller implements Marshaller<MessageId> {
/**
* Write the payload of this entry to the RawContainer
*
* @param object
* @param dataOut
* @throws IOException
*/
public void writePayload(MessageId object, DataOutput dataOut) throws IOException {
dataOut.writeUTF(object.toString());
}
/**
* Read the entry from the RawContainer
*
* @param dataIn
* @return unmarshalled object
* @throws IOException
*/
public MessageId readPayload(DataInput dataIn) throws IOException {
return new MessageId(dataIn.readUTF());
}
}

View File

@ -1,70 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.activemq.command.Message;
import org.apache.activemq.util.ByteSequence;
import org.apache.activemq.wireformat.WireFormat;
/**
* Implementation of a Marshaller for MessageIds
*
*
*/
public class MessageMarshaller implements Marshaller<Message> {
private WireFormat wireFormat;
/**
* Constructor
*
* @param wireFormat
*/
public MessageMarshaller(WireFormat wireFormat) {
this.wireFormat = wireFormat;
}
/**
* Write the payload of this entry to the RawContainer
*
* @param message
* @param dataOut
* @throws IOException
*/
public void writePayload(Message message, DataOutput dataOut) throws IOException {
ByteSequence packet = wireFormat.marshal(message);
dataOut.writeInt(packet.length);
dataOut.write(packet.data, packet.offset, packet.length);
}
/**
* Read the entry from the RawContainer
*
* @param dataIn
* @return unmarshalled object
* @throws IOException
*/
public Message readPayload(DataInput dataIn) throws IOException {
int size = dataIn.readInt();
byte[] data = new byte[size];
dataIn.readFully(data);
return (Message)wireFormat.unmarshal(new ByteSequence(data));
}
}

View File

@ -1,70 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
/**
* Implementation of a Marshaller for Objects
*
*
*/
public class ObjectMarshaller implements Marshaller {
/**
* Write the payload of this entry to the RawContainer
*
* @param object
* @param dataOut
* @throws IOException
*/
public void writePayload(Object object, DataOutput dataOut) throws IOException {
ByteArrayOutputStream bytesOut = new ByteArrayOutputStream();
ObjectOutputStream objectOut = new ObjectOutputStream(bytesOut);
objectOut.writeObject(object);
objectOut.close();
byte[] data = bytesOut.toByteArray();
dataOut.writeInt(data.length);
dataOut.write(data);
}
/**
* Read the entry from the RawContainer
*
* @param dataIn
* @return unmarshalled object
* @throws IOException
*/
public Object readPayload(DataInput dataIn) throws IOException {
int size = dataIn.readInt();
byte[] data = new byte[size];
dataIn.readFully(data);
ByteArrayInputStream bytesIn = new ByteArrayInputStream(data);
ObjectInputStream objectIn = new ObjectInputStream(bytesIn);
try {
return objectIn.readObject();
} catch (ClassNotFoundException e) {
throw new IOException(e.getMessage());
}
}
}

View File

@ -1,63 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha;
/**
* Runtime exception for the Store
*
*
*/
public class RuntimeStoreException extends RuntimeException {
private static final long serialVersionUID = 8807084681372365173L;
/**
* Constructor
*/
public RuntimeStoreException() {
super();
}
/**
* Constructor
*
* @param message
*/
public RuntimeStoreException(String message) {
super(message);
}
/**
* Constructor
*
* @param message
* @param cause
*/
public RuntimeStoreException(String message, Throwable cause) {
super(message, cause);
}
/**
* Constructor
*
* @param cause
*/
public RuntimeStoreException(Throwable cause) {
super(cause);
}
}

View File

@ -1,305 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha;
import java.io.IOException;
import java.util.Set;
/**
* A Store is holds persistent containers
*
*
*/
public interface Store {
/**
* Defauly container name
*/
String DEFAULT_CONTAINER_NAME = "kaha";
/**
* Byte Marshaller
*/
Marshaller BYTES_MARSHALLER = new BytesMarshaller();
/**
* Object Marshaller
*/
Marshaller OBJECT_MARSHALLER = new ObjectMarshaller();
/**
* String Marshaller
*/
Marshaller STRING_MARSHALLER = new StringMarshaller();
/**
* Command Marshaller
*/
Marshaller COMMAND_MARSHALLER = new CommandMarshaller();
/**
* MessageId marshaller
*/
Marshaller MESSAGEID_MARSHALLER = new MessageIdMarshaller();
/**
* close the store
*
* @throws IOException
*/
void close() throws IOException;
/**
* Force all writes to disk
*
* @throws IOException
*/
void force() throws IOException;
/**
* empty all the contents of the store
*
* @throws IOException
*/
void clear() throws IOException;
/**
* delete the store
*
* @return true if the delete was successful
* @throws IOException
*/
boolean delete() throws IOException;
/**
* Checks if a MapContainer exists in the default container
*
* @param id
* @return new MapContainer
* @throws IOException
*/
boolean doesMapContainerExist(Object id) throws IOException;
/**
* Checks if a MapContainer exists in the named container
*
* @param id
* @param containerName
* @return new MapContainer
* @throws IOException
*/
boolean doesMapContainerExist(Object id, String containerName) throws IOException;
/**
* Get a MapContainer with the given id - the MapContainer is created if
* needed
*
* @param id
* @return container for the associated id or null if it doesn't exist
* @throws IOException
*/
MapContainer getMapContainer(Object id) throws IOException;
/**
* Get a MapContainer with the given id - the MapContainer is created if
* needed
*
* @param id
* @param containerName
* @return container for the associated id or null if it doesn't exist
* @throws IOException
*/
MapContainer getMapContainer(Object id, String containerName) throws IOException;
/**
* Get a MapContainer with the given id - the MapContainer is created if
* needed
*
* @param id
* @param containerName
* @param persistentIndex
* @return container for the associated id or null if it doesn't exist
* @throws IOException
*/
MapContainer getMapContainer(Object id, String containerName, boolean persistentIndex) throws IOException;
/**
* delete a container from the default container
*
* @param id
* @throws IOException
*/
void deleteMapContainer(Object id) throws IOException;
/**
* delete a MapContainer from the name container
*
* @param id
* @param containerName
* @throws IOException
*/
void deleteMapContainer(Object id, String containerName) throws IOException;
/**
* Delete Map container
*
* @param id
* @throws IOException
*/
void deleteMapContainer(ContainerId id) throws IOException;
/**
* Get a Set of call MapContainer Ids
*
* @return the set of ids
* @throws IOException
*/
Set<ContainerId> getMapContainerIds() throws IOException;
/**
* Checks if a ListContainer exists in the default container
*
* @param id
* @return new MapContainer
* @throws IOException
*/
boolean doesListContainerExist(Object id) throws IOException;
/**
* Checks if a ListContainer exists in the named container
*
* @param id
* @param containerName
* @return new MapContainer
* @throws IOException
*/
boolean doesListContainerExist(Object id, String containerName) throws IOException;
/**
* Get a ListContainer with the given id and creates it if it doesn't exist
*
* @param id
* @return container for the associated id or null if it doesn't exist
* @throws IOException
*/
ListContainer getListContainer(Object id) throws IOException;
/**
* Get a ListContainer with the given id and creates it if it doesn't exist
*
* @param id
* @param containerName
* @return container for the associated id or null if it doesn't exist
* @throws IOException
*/
ListContainer getListContainer(Object id, String containerName) throws IOException;
/**
* Get a ListContainer with the given id and creates it if it doesn't exist
*
* @param id
* @param containerName
* @param persistentIndex
* @return container for the associated id or null if it doesn't exist
* @throws IOException
*/
ListContainer getListContainer(Object id, String containerName, boolean persistentIndex) throws IOException;
/**
* delete a ListContainer from the default container
*
* @param id
* @throws IOException
*/
void deleteListContainer(Object id) throws IOException;
/**
* delete a ListContainer from the named container
*
* @param id
* @param containerName
* @throws IOException
*/
void deleteListContainer(Object id, String containerName) throws IOException;
/**
* delete a list container
*
* @param id
* @throws IOException
*/
void deleteListContainer(ContainerId id) throws IOException;
/**
* Get a Set of call ListContainer Ids
*
* @return the set of ids
* @throws IOException
*/
Set<ContainerId> getListContainerIds() throws IOException;
/**
* @return the maxDataFileLength
*/
long getMaxDataFileLength();
/**
* @param maxDataFileLength the maxDataFileLength to set
*/
void setMaxDataFileLength(long maxDataFileLength);
/**
* @return true if the store has been initialized
*/
boolean isInitialized();
/**
* @return the amount of disk space the store is occupying
*/
long size();
/**
* @return true if persistent indexes are used by default
*/
public boolean isPersistentIndex();
/**
* Set a persistent index as the default if the parameter is true
* @param persistentIndex
*/
public void setPersistentIndex(boolean persistentIndex);
/**
* @return the default container name
*/
public String getDefaultContainerName();
/**
* set the default container name
* @param defaultContainerName
*/
public void setDefaultContainerName(String defaultContainerName);
/**
* An explict call to initialize - this will also be called
* implicitly for any other operation on the store.
* @throws IOException
*/
public void initialize() throws IOException;
}

View File

@ -1,58 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha;
/**
* Entry for Store data
*
*
*/
public interface StoreEntry {
StoreLocation getKeyDataItem();
StoreLocation getValueDataItem();
/**
* @return next item
*/
long getNextItem();
/**
* @return Returns the keyFile.
*/
int getKeyFile();
/**
* @return Returns the valueFile.
*/
int getValueFile();
/**
* @return Returns the valueOffset.
*/
long getValueOffset();
/**
* @return Returns the offset.
*/
long getOffset();
int getKeySize();
int getValueSize();
}

View File

@ -1,109 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha;
import java.io.File;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.activemq.kaha.impl.KahaStore;
/**
* Factory for creating stores
*
*
*/
public final class StoreFactory {
private StoreFactory() {
}
/**
* open or create a Store
*
* @param name
* @param mode
* @return the opened/created store
* @throws IOException
*/
public static Store open(String name, String mode) throws IOException {
return new KahaStore(name, mode,new AtomicLong());
}
/**
* Open or create a Store
*
* @param directory
* @param mode
* @return
* @throws IOException
*/
public static Store open(File directory, String mode) throws IOException {
return new KahaStore(directory, mode, new AtomicLong());
}
/**
* open or create a Store
* @param name
* @param mode
* @param size
* @return the opened/created store
* @throws IOException
*/
public static Store open(String name, String mode, AtomicLong size) throws IOException {
return new KahaStore(name, mode,size);
}
/**
* Open or create a Store
*
* @param directory
* @param mode
* @param size
* @return
* @throws IOException
*/
public static Store open(File directory, String mode, AtomicLong size) throws IOException {
return new KahaStore(directory, mode, size);
}
/**
* Delete a database
*
* @param name of the database
* @return true if successful
* @throws IOException
*/
public static boolean delete(String name) throws IOException {
KahaStore store = new KahaStore(name, "rw");
return store.delete();
}
/**
* Delete a database
*
* @param directory
* @return true if successful
* @throws IOException
*/
public static boolean delete(File directory) throws IOException {
KahaStore store = new KahaStore(directory, "rw");
return store.delete();
}
}

View File

@ -1,40 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha;
/**
* Location of a data in the Store
*
*
*/
public interface StoreLocation {
/**
* @return Returns the size.
*/
int getSize();
/**
* @return Returns the offset.
*/
long getOffset();
/**
* @return Returns the file.
*/
int getFile();
}

View File

@ -1,50 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
/**
* Implementation of a Marshaller for Strings
*
*
*/
public class StringMarshaller implements Marshaller<String> {
/**
* Write the payload of this entry to the RawContainer
*
* @param object
* @param dataOut
* @throws IOException
*/
public void writePayload(String object, DataOutput dataOut) throws IOException {
dataOut.writeUTF(object);
}
/**
* Read the entry from the RawContainer
*
* @param dataIn
* @return unmarshalled object
* @throws IOException
*/
public String readPayload(DataInput dataIn) throws IOException {
return dataIn.readUTF();
}
}

View File

@ -1,55 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl;
import java.io.IOException;
import org.apache.activemq.kaha.Marshaller;
import org.apache.activemq.kaha.StoreLocation;
import org.apache.activemq.kaha.impl.data.RedoListener;
public interface DataManager {
String getName();
Object readItem(Marshaller marshaller, StoreLocation item) throws IOException;
StoreLocation storeDataItem(Marshaller marshaller, Object payload) throws IOException;
StoreLocation storeRedoItem(Object payload) throws IOException;
void updateItem(StoreLocation location, Marshaller marshaller, Object payload) throws IOException;
void recoverRedoItems(RedoListener listener) throws IOException;
void close() throws IOException;
void force() throws IOException;
boolean delete() throws IOException;
void addInterestInFile(int file) throws IOException;
void removeInterestInFile(int file) throws IOException;
void consolidateDataFiles() throws IOException;
Marshaller getRedoMarshaller();
void setRedoMarshaller(Marshaller redoMarshaller);
}

View File

@ -1,135 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl;
import java.io.IOException;
import java.util.LinkedList;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.activemq.kaha.ContainerId;
import org.apache.activemq.kaha.Marshaller;
import org.apache.activemq.kaha.Store;
import org.apache.activemq.kaha.StoreEntry;
import org.apache.activemq.kaha.StoreLocation;
import org.apache.activemq.kaha.impl.data.Item;
import org.apache.activemq.kaha.impl.index.IndexItem;
import org.apache.activemq.kaha.impl.index.IndexManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A container of roots for other Containers
*
*
*/
class IndexRootContainer {
protected static final Marshaller ROOT_MARSHALLER = Store.OBJECT_MARSHALLER;
private static final Logger LOG = LoggerFactory.getLogger(IndexRootContainer.class);
protected IndexItem root;
protected IndexManager indexManager;
protected DataManager dataManager;
protected Map<Object, StoreEntry> map = new ConcurrentHashMap<Object, StoreEntry>();
protected LinkedList<StoreEntry> list = new LinkedList<StoreEntry>();
IndexRootContainer(IndexItem root, IndexManager im, DataManager dfm) throws IOException {
this.root = root;
this.indexManager = im;
this.dataManager = dfm;
long nextItem = root.getNextItem();
while (nextItem != Item.POSITION_NOT_SET) {
StoreEntry item = indexManager.getIndex(nextItem);
StoreLocation data = item.getKeyDataItem();
Object key = dataManager.readItem(ROOT_MARSHALLER, data);
map.put(key, item);
list.add(item);
nextItem = item.getNextItem();
dataManager.addInterestInFile(item.getKeyFile());
}
}
Set<Object> getKeys() {
return map.keySet();
}
IndexItem addRoot(IndexManager containerIndexManager, ContainerId key) throws IOException {
if (map.containsKey(key)) {
removeRoot(containerIndexManager, key);
}
StoreLocation data = dataManager.storeDataItem(ROOT_MARSHALLER, key);
IndexItem newRoot = indexManager.createNewIndex();
newRoot.setKeyData(data);
IndexItem containerRoot = containerIndexManager.createNewIndex();
containerIndexManager.storeIndex(containerRoot);
newRoot.setValueOffset(containerRoot.getOffset());
IndexItem last = list.isEmpty() ? null : (IndexItem)list.getLast();
last = last == null ? root : last;
long prev = last.getOffset();
newRoot.setPreviousItem(prev);
indexManager.storeIndex(newRoot);
last.setNextItem(newRoot.getOffset());
indexManager.storeIndex(last);
map.put(key, newRoot);
list.add(newRoot);
return containerRoot;
}
void removeRoot(IndexManager containerIndexManager, ContainerId key) throws IOException {
StoreEntry oldRoot = map.remove(key);
if (oldRoot != null) {
dataManager.removeInterestInFile(oldRoot.getKeyFile());
// get the container root
IndexItem containerRoot = containerIndexManager.getIndex(oldRoot.getValueOffset());
if (containerRoot != null) {
containerIndexManager.freeIndex(containerRoot);
}
int index = list.indexOf(oldRoot);
IndexItem prev = index > 0 ? (IndexItem)list.get(index - 1) : root;
prev = prev == null ? root : prev;
IndexItem next = index < (list.size() - 1) ? (IndexItem)list.get(index + 1) : null;
if (next != null) {
prev.setNextItem(next.getOffset());
next.setPreviousItem(prev.getOffset());
indexManager.updateIndexes(next);
} else {
prev.setNextItem(Item.POSITION_NOT_SET);
}
indexManager.updateIndexes(prev);
list.remove(oldRoot);
indexManager.freeIndex((IndexItem)oldRoot);
}
}
IndexItem getRoot(IndexManager containerIndexManager, ContainerId key) throws IOException {
StoreEntry index = map.get(key);
if (index != null) {
return containerIndexManager.getIndex(index.getValueOffset());
}
return null;
}
boolean doesRootExist(Object key) {
return map.containsKey(key);
}
}

View File

@ -1,576 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.channels.FileLock;
import java.util.Date;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.activemq.kaha.ContainerId;
import org.apache.activemq.kaha.ListContainer;
import org.apache.activemq.kaha.MapContainer;
import org.apache.activemq.kaha.Store;
import org.apache.activemq.kaha.StoreLocation;
import org.apache.activemq.kaha.impl.async.AsyncDataManager;
import org.apache.activemq.kaha.impl.async.DataManagerFacade;
import org.apache.activemq.kaha.impl.container.ListContainerImpl;
import org.apache.activemq.kaha.impl.container.MapContainerImpl;
import org.apache.activemq.kaha.impl.data.DataManagerImpl;
import org.apache.activemq.kaha.impl.data.Item;
import org.apache.activemq.kaha.impl.data.RedoListener;
import org.apache.activemq.kaha.impl.index.IndexItem;
import org.apache.activemq.kaha.impl.index.IndexManager;
import org.apache.activemq.kaha.impl.index.RedoStoreIndexItem;
import org.apache.activemq.util.IOHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Store Implementation
*
*
*/
public class KahaStore implements Store {
private static final String PROPERTY_PREFIX = "org.apache.activemq.kaha.Store";
private static final boolean BROKEN_FILE_LOCK = "true".equals(System.getProperty(PROPERTY_PREFIX
+ ".FileLockBroken",
"false"));
private static final boolean DISABLE_LOCKING = "true".equals(System.getProperty(PROPERTY_PREFIX
+ ".DisableLocking",
"false"));
//according to the String javadoc, all constant strings are interned so this will be the same object throughout the vm
//and we can use it as a monitor for the lockset.
private final static String LOCKSET_MONITOR = PROPERTY_PREFIX + ".Lock.Monitor";
private static final Logger LOG = LoggerFactory.getLogger(KahaStore.class);
private final File directory;
private final String mode;
private IndexRootContainer mapsContainer;
private IndexRootContainer listsContainer;
private final Map<ContainerId, ListContainerImpl> lists = new ConcurrentHashMap<ContainerId, ListContainerImpl>();
private final Map<ContainerId, MapContainerImpl> maps = new ConcurrentHashMap<ContainerId, MapContainerImpl>();
private final Map<String, DataManager> dataManagers = new ConcurrentHashMap<String, DataManager>();
private final Map<String, IndexManager> indexManagers = new ConcurrentHashMap<String, IndexManager>();
private boolean closed;
private boolean initialized;
private boolean logIndexChanges;
private boolean useAsyncDataManager;
private long maxDataFileLength = 1024 * 1024 * 32;
private FileLock lock;
private boolean persistentIndex = true;
private RandomAccessFile lockFile;
private final AtomicLong storeSize;
private String defaultContainerName = DEFAULT_CONTAINER_NAME;
public KahaStore(String name, String mode) throws IOException {
this(new File(IOHelper.toFileSystemDirectorySafeName(name)), mode, new AtomicLong());
}
public KahaStore(File directory, String mode) throws IOException {
this(directory, mode, new AtomicLong());
}
public KahaStore(String name, String mode,AtomicLong storeSize) throws IOException {
this(new File(IOHelper.toFileSystemDirectorySafeName(name)), mode, storeSize);
}
public KahaStore(File directory, String mode, AtomicLong storeSize) throws IOException {
this.mode = mode;
this.storeSize = storeSize;
this.directory = directory;
IOHelper.mkdirs(this.directory);
}
public synchronized void close() throws IOException {
if (!closed) {
closed = true;
if (initialized) {
unlock();
for (ListContainerImpl container : lists.values()) {
container.close();
}
lists.clear();
for (MapContainerImpl container : maps.values()) {
container.close();
}
maps.clear();
for (Iterator<IndexManager> iter = indexManagers.values().iterator(); iter.hasNext();) {
IndexManager im = iter.next();
im.close();
iter.remove();
}
for (Iterator<DataManager> iter = dataManagers.values().iterator(); iter.hasNext();) {
DataManager dm = iter.next();
dm.close();
iter.remove();
}
}
if (lockFile!=null) {
lockFile.close();
lockFile=null;
}
}
}
public synchronized void force() throws IOException {
if (initialized) {
for (Iterator<IndexManager> iter = indexManagers.values().iterator(); iter.hasNext();) {
IndexManager im = iter.next();
im.force();
}
for (Iterator<DataManager> iter = dataManagers.values().iterator(); iter.hasNext();) {
DataManager dm = iter.next();
dm.force();
}
}
}
public synchronized void clear() throws IOException {
initialize();
for (Iterator i = mapsContainer.getKeys().iterator(); i.hasNext();) {
ContainerId id = (ContainerId)i.next();
MapContainer container = getMapContainer(id.getKey(), id.getDataContainerName());
container.clear();
}
for (Iterator i = listsContainer.getKeys().iterator(); i.hasNext();) {
ContainerId id = (ContainerId)i.next();
ListContainer container = getListContainer(id.getKey(), id.getDataContainerName());
container.clear();
}
}
public synchronized boolean delete() throws IOException {
boolean result = true;
if (initialized) {
clear();
for (Iterator<IndexManager> iter = indexManagers.values().iterator(); iter.hasNext();) {
IndexManager im = iter.next();
result &= im.delete();
iter.remove();
}
for (Iterator<DataManager> iter = dataManagers.values().iterator(); iter.hasNext();) {
DataManager dm = iter.next();
result &= dm.delete();
iter.remove();
}
}
if (directory != null && directory.isDirectory()) {
result =IOHelper.deleteChildren(directory);
String str = result ? "successfully deleted" : "failed to delete";
LOG.info("Kaha Store " + str + " data directory " + directory);
}
return result;
}
public synchronized boolean isInitialized() {
return initialized;
}
public boolean doesMapContainerExist(Object id) throws IOException {
return doesMapContainerExist(id, defaultContainerName);
}
public synchronized boolean doesMapContainerExist(Object id, String containerName) throws IOException {
initialize();
ContainerId containerId = new ContainerId(id, containerName);
return maps.containsKey(containerId) || mapsContainer.doesRootExist(containerId);
}
public MapContainer getMapContainer(Object id) throws IOException {
return getMapContainer(id, defaultContainerName);
}
public MapContainer getMapContainer(Object id, String containerName) throws IOException {
return getMapContainer(id, containerName, persistentIndex);
}
public synchronized MapContainer getMapContainer(Object id, String containerName, boolean persistentIndex)
throws IOException {
initialize();
ContainerId containerId = new ContainerId(id, containerName);
MapContainerImpl result = maps.get(containerId);
if (result == null) {
DataManager dm = getDataManager(containerName);
IndexManager im = getIndexManager(dm, containerName);
IndexItem root = mapsContainer.getRoot(im, containerId);
if (root == null) {
root = mapsContainer.addRoot(im, containerId);
}
result = new MapContainerImpl(directory, containerId, root, im, dm, persistentIndex);
maps.put(containerId, result);
}
return result;
}
public void deleteMapContainer(Object id) throws IOException {
deleteMapContainer(id, defaultContainerName);
}
public void deleteMapContainer(Object id, String containerName) throws IOException {
ContainerId containerId = new ContainerId(id, containerName);
deleteMapContainer(containerId);
}
public synchronized void deleteMapContainer(ContainerId containerId) throws IOException {
initialize();
MapContainerImpl container = maps.remove(containerId);
if (container != null) {
container.clear();
mapsContainer.removeRoot(container.getIndexManager(), containerId);
container.close();
}
}
public synchronized Set<ContainerId> getMapContainerIds() throws IOException {
initialize();
Set<ContainerId> set = new HashSet<ContainerId>();
for (Iterator i = mapsContainer.getKeys().iterator(); i.hasNext();) {
ContainerId id = (ContainerId)i.next();
set.add(id);
}
return set;
}
public boolean doesListContainerExist(Object id) throws IOException {
return doesListContainerExist(id, defaultContainerName);
}
public synchronized boolean doesListContainerExist(Object id, String containerName) throws IOException {
initialize();
ContainerId containerId = new ContainerId(id, containerName);
return lists.containsKey(containerId) || listsContainer.doesRootExist(containerId);
}
public ListContainer getListContainer(Object id) throws IOException {
return getListContainer(id, defaultContainerName);
}
public ListContainer getListContainer(Object id, String containerName) throws IOException {
return getListContainer(id, containerName, persistentIndex);
}
public synchronized ListContainer getListContainer(Object id, String containerName,
boolean persistentIndex) throws IOException {
initialize();
ContainerId containerId = new ContainerId(id, containerName);
ListContainerImpl result = lists.get(containerId);
if (result == null) {
DataManager dm = getDataManager(containerName);
IndexManager im = getIndexManager(dm, containerName);
IndexItem root = listsContainer.getRoot(im, containerId);
if (root == null) {
root = listsContainer.addRoot(im, containerId);
}
result = new ListContainerImpl(containerId, root, im, dm, persistentIndex);
lists.put(containerId, result);
}
return result;
}
public void deleteListContainer(Object id) throws IOException {
deleteListContainer(id, defaultContainerName);
}
public synchronized void deleteListContainer(Object id, String containerName) throws IOException {
ContainerId containerId = new ContainerId(id, containerName);
deleteListContainer(containerId);
}
public synchronized void deleteListContainer(ContainerId containerId) throws IOException {
initialize();
ListContainerImpl container = lists.remove(containerId);
if (container != null) {
listsContainer.removeRoot(container.getIndexManager(), containerId);
container.clear();
container.close();
}
}
public synchronized Set<ContainerId> getListContainerIds() throws IOException {
initialize();
Set<ContainerId> set = new HashSet<ContainerId>();
for (Iterator i = listsContainer.getKeys().iterator(); i.hasNext();) {
ContainerId id = (ContainerId)i.next();
set.add(id);
}
return set;
}
/**
* @return the listsContainer
*/
public IndexRootContainer getListsContainer() {
return this.listsContainer;
}
/**
* @return the mapsContainer
*/
public IndexRootContainer getMapsContainer() {
return this.mapsContainer;
}
public synchronized DataManager getDataManager(String name) throws IOException {
DataManager dm = dataManagers.get(name);
if (dm == null) {
if (isUseAsyncDataManager()) {
AsyncDataManager t = new AsyncDataManager(storeSize);
t.setDirectory(directory);
t.setFilePrefix("async-data-" + name + "-");
t.setMaxFileLength((int)maxDataFileLength);
t.start();
dm = new DataManagerFacade(t, name);
} else {
DataManagerImpl t = new DataManagerImpl(directory, name,storeSize);
t.setMaxFileLength(maxDataFileLength);
dm = t;
}
if (logIndexChanges) {
recover(dm);
}
dataManagers.put(name, dm);
}
return dm;
}
public synchronized IndexManager getIndexManager(DataManager dm, String name) throws IOException {
IndexManager im = indexManagers.get(name);
if (im == null) {
im = new IndexManager(directory, name, mode, logIndexChanges ? dm : null,storeSize);
indexManagers.put(name, im);
}
return im;
}
private void recover(final DataManager dm) throws IOException {
dm.recoverRedoItems(new RedoListener() {
public void onRedoItem(StoreLocation item, Object o) throws Exception {
RedoStoreIndexItem redo = (RedoStoreIndexItem)o;
// IndexManager im = getIndexManager(dm, redo.getIndexName());
IndexManager im = getIndexManager(dm, dm.getName());
im.redo(redo);
}
});
}
public synchronized boolean isLogIndexChanges() {
return logIndexChanges;
}
public synchronized void setLogIndexChanges(boolean logIndexChanges) {
this.logIndexChanges = logIndexChanges;
}
/**
* @return the maxDataFileLength
*/
public synchronized long getMaxDataFileLength() {
return maxDataFileLength;
}
/**
* @param maxDataFileLength the maxDataFileLength to set
*/
public synchronized void setMaxDataFileLength(long maxDataFileLength) {
this.maxDataFileLength = maxDataFileLength;
}
/**
* @return the default index type
*/
public synchronized String getIndexTypeAsString() {
return persistentIndex ? "PERSISTENT" : "VM";
}
/**
* Set the default index type
*
* @param type "PERSISTENT" or "VM"
*/
public synchronized void setIndexTypeAsString(String type) {
if (type.equalsIgnoreCase("VM")) {
persistentIndex = false;
} else {
persistentIndex = true;
}
}
public boolean isPersistentIndex() {
return persistentIndex;
}
public void setPersistentIndex(boolean persistentIndex) {
this.persistentIndex = persistentIndex;
}
public synchronized boolean isUseAsyncDataManager() {
return useAsyncDataManager;
}
public synchronized void setUseAsyncDataManager(boolean useAsyncWriter) {
this.useAsyncDataManager = useAsyncWriter;
}
/**
* @return size of store
* @see org.apache.activemq.kaha.Store#size()
*/
public long size(){
return storeSize.get();
}
public String getDefaultContainerName() {
return defaultContainerName;
}
public void setDefaultContainerName(String defaultContainerName) {
this.defaultContainerName = defaultContainerName;
}
public synchronized void initialize() throws IOException {
if (closed) {
throw new IOException("Store has been closed.");
}
if (!initialized) {
LOG.info("Kaha Store using data directory " + directory);
lockFile = new RandomAccessFile(new File(directory, "lock"), "rw");
lock();
DataManager defaultDM = getDataManager(defaultContainerName);
IndexManager rootIndexManager = getIndexManager(defaultDM, defaultContainerName);
IndexItem mapRoot = new IndexItem();
IndexItem listRoot = new IndexItem();
if (rootIndexManager.isEmpty()) {
mapRoot.setOffset(0);
rootIndexManager.storeIndex(mapRoot);
listRoot.setOffset(IndexItem.INDEX_SIZE);
rootIndexManager.storeIndex(listRoot);
rootIndexManager.setLength(IndexItem.INDEX_SIZE * 2);
} else {
mapRoot = rootIndexManager.getIndex(0);
listRoot = rootIndexManager.getIndex(IndexItem.INDEX_SIZE);
}
initialized = true;
mapsContainer = new IndexRootContainer(mapRoot, rootIndexManager, defaultDM);
listsContainer = new IndexRootContainer(listRoot, rootIndexManager, defaultDM);
/**
* Add interest in data files - then consolidate them
*/
generateInterestInMapDataFiles();
generateInterestInListDataFiles();
for (Iterator<DataManager> i = dataManagers.values().iterator(); i.hasNext();) {
DataManager dm = i.next();
dm.consolidateDataFiles();
}
}
}
private void lock() throws IOException {
synchronized (LOCKSET_MONITOR) {
if (!DISABLE_LOCKING && directory != null && lock == null) {
String key = getPropertyKey();
String property = System.getProperty(key);
if (null == property) {
if (!BROKEN_FILE_LOCK) {
lock = lockFile.getChannel().tryLock(0, Math.max(1, lockFile.getChannel().size()), false);
if (lock == null) {
throw new StoreLockedExcpetion("Kaha Store " + directory.getName() + " is already opened by another application");
} else
System.setProperty(key, new Date().toString());
}
} else { //already locked
throw new StoreLockedExcpetion("Kaha Store " + directory.getName() + " is already opened by this application.");
}
}
}
}
private void unlock() throws IOException {
synchronized (LOCKSET_MONITOR) {
if (!DISABLE_LOCKING && (null != directory) && (null != lock)) {
System.getProperties().remove(getPropertyKey());
if (lock.isValid()) {
lock.release();
}
lock = null;
}
}
}
private String getPropertyKey() throws IOException {
return getClass().getName() + ".lock." + directory.getCanonicalPath();
}
/**
* scans the directory and builds up the IndexManager and DataManager
*
* @throws IOException if there is a problem accessing an index or data file
*/
private void generateInterestInListDataFiles() throws IOException {
for (Iterator i = listsContainer.getKeys().iterator(); i.hasNext();) {
ContainerId id = (ContainerId)i.next();
DataManager dm = getDataManager(id.getDataContainerName());
IndexManager im = getIndexManager(dm, id.getDataContainerName());
IndexItem theRoot = listsContainer.getRoot(im, id);
long nextItem = theRoot.getNextItem();
while (nextItem != Item.POSITION_NOT_SET) {
IndexItem item = im.getIndex(nextItem);
item.setOffset(nextItem);
dm.addInterestInFile(item.getKeyFile());
dm.addInterestInFile(item.getValueFile());
nextItem = item.getNextItem();
}
}
}
/**
* scans the directory and builds up the IndexManager and DataManager
*
* @throws IOException if there is a problem accessing an index or data file
*/
private void generateInterestInMapDataFiles() throws IOException {
for (Iterator i = mapsContainer.getKeys().iterator(); i.hasNext();) {
ContainerId id = (ContainerId)i.next();
DataManager dm = getDataManager(id.getDataContainerName());
IndexManager im = getIndexManager(dm, id.getDataContainerName());
IndexItem theRoot = mapsContainer.getRoot(im, id);
long nextItem = theRoot.getNextItem();
while (nextItem != Item.POSITION_NOT_SET) {
IndexItem item = im.getIndex(nextItem);
item.setOffset(nextItem);
dm.addInterestInFile(item.getKeyFile());
dm.addInterestInFile(item.getValueFile());
nextItem = item.getNextItem();
}
}
}
}

View File

@ -1,42 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl;
import java.io.IOException;
/**
* Exception thrown if the store is in use by another application
*
*
*/
public class StoreLockedExcpetion extends IOException {
private static final long serialVersionUID = 3857646689671366926L;
/**
* Default Constructor
*/
public StoreLockedExcpetion() {
}
/**
* @param s
*/
public StoreLockedExcpetion(String s) {
super(s);
}
}

View File

@ -1,774 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.async;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.activemq.kaha.impl.async.DataFileAppender.WriteCommand;
import org.apache.activemq.kaha.impl.async.DataFileAppender.WriteKey;
import org.apache.activemq.thread.Scheduler;
import org.apache.activemq.util.ByteSequence;
import org.apache.activemq.util.IOHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Manages DataFiles
*
*
*/
public class AsyncDataManager {
public static final int CONTROL_RECORD_MAX_LENGTH = 1024;
public static final int ITEM_HEAD_RESERVED_SPACE = 21;
// ITEM_HEAD_SPACE = length + type+ reserved space + SOR
public static final int ITEM_HEAD_SPACE = 4 + 1 + ITEM_HEAD_RESERVED_SPACE + 3;
public static final int ITEM_HEAD_OFFSET_TO_SOR = ITEM_HEAD_SPACE - 3;
public static final int ITEM_FOOT_SPACE = 3; // EOR
public static final int ITEM_HEAD_FOOT_SPACE = ITEM_HEAD_SPACE + ITEM_FOOT_SPACE;
public static final byte[] ITEM_HEAD_SOR = new byte[] {'S', 'O', 'R'}; //
public static final byte[] ITEM_HEAD_EOR = new byte[] {'E', 'O', 'R'}; //
public static final byte DATA_ITEM_TYPE = 1;
public static final byte REDO_ITEM_TYPE = 2;
public static final String DEFAULT_DIRECTORY = "data";
public static final String DEFAULT_ARCHIVE_DIRECTORY = "data-archive";
public static final String DEFAULT_FILE_PREFIX = "data-";
public static final int DEFAULT_MAX_FILE_LENGTH = 1024 * 1024 * 32;
public static final int DEFAULT_CLEANUP_INTERVAL = 1000 * 30;
public static final int PREFERED_DIFF = 1024 * 512;
private static final Logger LOG = LoggerFactory.getLogger(AsyncDataManager.class);
protected Scheduler scheduler;
protected final Map<WriteKey, WriteCommand> inflightWrites = new ConcurrentHashMap<WriteKey, WriteCommand>();
protected File directory = new File(DEFAULT_DIRECTORY);
protected File directoryArchive = new File (DEFAULT_ARCHIVE_DIRECTORY);
protected String filePrefix = DEFAULT_FILE_PREFIX;
protected ControlFile controlFile;
protected boolean started;
protected boolean useNio = true;
protected int maxFileLength = DEFAULT_MAX_FILE_LENGTH;
protected int preferedFileLength = DEFAULT_MAX_FILE_LENGTH - PREFERED_DIFF;
protected DataFileAppender appender;
protected DataFileAccessorPool accessorPool;
protected Map<Integer, DataFile> fileMap = new HashMap<Integer, DataFile>();
protected Map<File, DataFile> fileByFileMap = new LinkedHashMap<File, DataFile>();
protected DataFile currentWriteFile;
protected Location mark;
protected final AtomicReference<Location> lastAppendLocation = new AtomicReference<Location>();
protected Runnable cleanupTask;
protected final AtomicLong storeSize;
protected boolean archiveDataLogs;
public AsyncDataManager(AtomicLong storeSize) {
this.storeSize=storeSize;
}
public AsyncDataManager() {
this(new AtomicLong());
}
@SuppressWarnings("unchecked")
public synchronized void start() throws IOException {
if (started) {
return;
}
started = true;
preferedFileLength=Math.max(PREFERED_DIFF, getMaxFileLength()-PREFERED_DIFF);
lock();
accessorPool = new DataFileAccessorPool(this);
ByteSequence sequence = controlFile.load();
if (sequence != null && sequence.getLength() > 0) {
unmarshallState(sequence);
}
if (useNio) {
appender = new NIODataFileAppender(this);
} else {
appender = new DataFileAppender(this);
}
File[] files = directory.listFiles(new FilenameFilter() {
public boolean accept(File dir, String n) {
return dir.equals(directory) && n.startsWith(filePrefix);
}
});
if (files != null) {
for (int i = 0; i < files.length; i++) {
try {
File file = files[i];
String n = file.getName();
String numStr = n.substring(filePrefix.length(), n.length());
int num = Integer.parseInt(numStr);
DataFile dataFile = new DataFile(file, num, preferedFileLength);
fileMap.put(dataFile.getDataFileId(), dataFile);
storeSize.addAndGet(dataFile.getLength());
} catch (NumberFormatException e) {
// Ignore file that do not match the pattern.
}
}
// Sort the list so that we can link the DataFiles together in the
// right order.
List<DataFile> l = new ArrayList<DataFile>(fileMap.values());
Collections.sort(l);
currentWriteFile = null;
for (DataFile df : l) {
if (currentWriteFile != null) {
currentWriteFile.linkAfter(df);
}
currentWriteFile = df;
fileByFileMap.put(df.getFile(), df);
}
}
// Need to check the current Write File to see if there was a partial
// write to it.
if (currentWriteFile != null) {
// See if the lastSyncedLocation is valid..
Location l = lastAppendLocation.get();
if (l != null && l.getDataFileId() != currentWriteFile.getDataFileId().intValue()) {
l = null;
}
// If we know the last location that was ok.. then we can skip lots
// of checking
try{
l = recoveryCheck(currentWriteFile, l);
lastAppendLocation.set(l);
}catch(IOException e){
LOG.warn("recovery check failed", e);
}
}
storeState(false);
cleanupTask = new Runnable() {
public void run() {
cleanup();
}
};
this.scheduler = new Scheduler("AsyncDataManager Scheduler");
try {
this.scheduler.start();
} catch (Exception e) {
IOException ioe = new IOException("scheduler start: " + e);
ioe.initCause(e);
throw ioe;
}
this.scheduler.executePeriodically(cleanupTask, DEFAULT_CLEANUP_INTERVAL);
}
public void lock() throws IOException {
synchronized (this) {
if (controlFile == null || controlFile.isDisposed()) {
IOHelper.mkdirs(directory);
controlFile = new ControlFile(new File(directory, filePrefix + "control"), CONTROL_RECORD_MAX_LENGTH);
}
controlFile.lock();
}
}
protected Location recoveryCheck(DataFile dataFile, Location location) throws IOException {
if (location == null) {
location = new Location();
location.setDataFileId(dataFile.getDataFileId());
location.setOffset(0);
}
DataFileAccessor reader = accessorPool.openDataFileAccessor(dataFile);
try {
reader.readLocationDetails(location);
while (reader.readLocationDetailsAndValidate(location)) {
location.setOffset(location.getOffset() + location.getSize());
}
} finally {
accessorPool.closeDataFileAccessor(reader);
}
dataFile.setLength(location.getOffset());
return location;
}
protected void unmarshallState(ByteSequence sequence) throws IOException {
ByteArrayInputStream bais = new ByteArrayInputStream(sequence.getData(), sequence.getOffset(), sequence.getLength());
DataInputStream dis = new DataInputStream(bais);
if (dis.readBoolean()) {
mark = new Location();
mark.readExternal(dis);
} else {
mark = null;
}
if (dis.readBoolean()) {
Location l = new Location();
l.readExternal(dis);
lastAppendLocation.set(l);
} else {
lastAppendLocation.set(null);
}
}
private synchronized ByteSequence marshallState() throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(baos);
if (mark != null) {
dos.writeBoolean(true);
mark.writeExternal(dos);
} else {
dos.writeBoolean(false);
}
Location l = lastAppendLocation.get();
if (l != null) {
dos.writeBoolean(true);
l.writeExternal(dos);
} else {
dos.writeBoolean(false);
}
byte[] bs = baos.toByteArray();
return new ByteSequence(bs, 0, bs.length);
}
synchronized DataFile allocateLocation(Location location) throws IOException {
if (currentWriteFile == null || ((currentWriteFile.getLength() + location.getSize()) > maxFileLength)) {
int nextNum = currentWriteFile != null ? currentWriteFile.getDataFileId().intValue() + 1 : 1;
String fileName = filePrefix + nextNum;
File file = new File(directory, fileName);
DataFile nextWriteFile = new DataFile(file, nextNum, preferedFileLength);
//actually allocate the disk space
nextWriteFile.closeRandomAccessFile(nextWriteFile.openRandomAccessFile(true));
fileMap.put(nextWriteFile.getDataFileId(), nextWriteFile);
fileByFileMap.put(file, nextWriteFile);
if (currentWriteFile != null) {
currentWriteFile.linkAfter(nextWriteFile);
if (currentWriteFile.isUnused()) {
removeDataFile(currentWriteFile);
}
}
currentWriteFile = nextWriteFile;
}
location.setOffset(currentWriteFile.getLength());
location.setDataFileId(currentWriteFile.getDataFileId().intValue());
int size = location.getSize();
currentWriteFile.incrementLength(size);
currentWriteFile.increment();
storeSize.addAndGet(size);
return currentWriteFile;
}
public synchronized void removeLocation(Location location) throws IOException{
DataFile dataFile = getDataFile(location);
dataFile.decrement();
}
synchronized DataFile getDataFile(Location item) throws IOException {
Integer key = Integer.valueOf(item.getDataFileId());
DataFile dataFile = fileMap.get(key);
if (dataFile == null) {
LOG.error("Looking for key " + key + " but not found in fileMap: " + fileMap);
throw new IOException("Could not locate data file " + filePrefix + item.getDataFileId());
}
return dataFile;
}
synchronized File getFile(Location item) throws IOException {
Integer key = Integer.valueOf(item.getDataFileId());
DataFile dataFile = fileMap.get(key);
if (dataFile == null) {
LOG.error("Looking for key " + key + " but not found in fileMap: " + fileMap);
throw new IOException("Could not locate data file " + filePrefix + item.getDataFileId());
}
return dataFile.getFile();
}
private DataFile getNextDataFile(DataFile dataFile) {
return (DataFile)dataFile.getNext();
}
public synchronized void close() throws IOException {
if (!started) {
return;
}
this.scheduler.cancel(cleanupTask);
try {
this.scheduler.stop();
} catch (Exception e) {
IOException ioe = new IOException("scheduler stop: " + e);
ioe.initCause(e);
throw ioe;
}
accessorPool.close();
storeState(false);
appender.close();
fileMap.clear();
fileByFileMap.clear();
controlFile.unlock();
controlFile.dispose();
started = false;
}
synchronized void cleanup() {
if (accessorPool != null) {
accessorPool.disposeUnused();
}
}
public synchronized boolean delete() throws IOException {
// Close all open file handles...
appender.close();
accessorPool.close();
boolean result = true;
for (Iterator i = fileMap.values().iterator(); i.hasNext();) {
DataFile dataFile = (DataFile)i.next();
storeSize.addAndGet(-dataFile.getLength());
result &= dataFile.delete();
}
fileMap.clear();
fileByFileMap.clear();
lastAppendLocation.set(null);
mark = null;
currentWriteFile = null;
// reopen open file handles...
accessorPool = new DataFileAccessorPool(this);
if (useNio) {
appender = new NIODataFileAppender(this);
} else {
appender = new DataFileAppender(this);
}
return result;
}
public synchronized void addInterestInFile(int file) throws IOException {
if (file >= 0) {
Integer key = Integer.valueOf(file);
DataFile dataFile = fileMap.get(key);
if (dataFile == null) {
throw new IOException("That data file does not exist");
}
addInterestInFile(dataFile);
}
}
synchronized void addInterestInFile(DataFile dataFile) {
if (dataFile != null) {
dataFile.increment();
}
}
public synchronized void removeInterestInFile(int file) throws IOException {
if (file >= 0) {
Integer key = Integer.valueOf(file);
DataFile dataFile = fileMap.get(key);
removeInterestInFile(dataFile);
}
}
synchronized void removeInterestInFile(DataFile dataFile) throws IOException {
if (dataFile != null) {
if (dataFile.decrement() <= 0) {
removeDataFile(dataFile);
}
}
}
public synchronized void consolidateDataFilesNotIn(Set<Integer> inUse, Set<Integer>inProgress) throws IOException {
Set<Integer> unUsed = new HashSet<Integer>(fileMap.keySet());
unUsed.removeAll(inUse);
unUsed.removeAll(inProgress);
List<DataFile> purgeList = new ArrayList<DataFile>();
for (Integer key : unUsed) {
DataFile dataFile = fileMap.get(key);
purgeList.add(dataFile);
}
for (DataFile dataFile : purgeList) {
if (dataFile.getDataFileId() != currentWriteFile.getDataFileId()) {
forceRemoveDataFile(dataFile);
}
}
}
public synchronized void consolidateDataFilesNotIn(Set<Integer> inUse, Integer lastFile) throws IOException {
Set<Integer> unUsed = new HashSet<Integer>(fileMap.keySet());
unUsed.removeAll(inUse);
List<DataFile> purgeList = new ArrayList<DataFile>();
for (Integer key : unUsed) {
// Only add files less than the lastFile..
if( key.intValue() < lastFile.intValue() ) {
DataFile dataFile = fileMap.get(key);
purgeList.add(dataFile);
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("lastFileId=" + lastFile + ", purgeList: (" + purgeList.size() + ") " + purgeList);
}
for (DataFile dataFile : purgeList) {
forceRemoveDataFile(dataFile);
}
}
public synchronized void consolidateDataFiles() throws IOException {
List<DataFile> purgeList = new ArrayList<DataFile>();
for (DataFile dataFile : fileMap.values()) {
if (dataFile.isUnused()) {
purgeList.add(dataFile);
}
}
for (DataFile dataFile : purgeList) {
removeDataFile(dataFile);
}
}
private synchronized void removeDataFile(DataFile dataFile) throws IOException {
// Make sure we don't delete too much data.
if (dataFile == currentWriteFile || mark == null || dataFile.getDataFileId() >= mark.getDataFileId()) {
LOG.debug("Won't remove DataFile" + dataFile);
return;
}
forceRemoveDataFile(dataFile);
}
private synchronized void forceRemoveDataFile(DataFile dataFile)
throws IOException {
accessorPool.disposeDataFileAccessors(dataFile);
fileByFileMap.remove(dataFile.getFile());
fileMap.remove(dataFile.getDataFileId());
storeSize.addAndGet(-dataFile.getLength());
dataFile.unlink();
if (archiveDataLogs) {
dataFile.move(getDirectoryArchive());
LOG.debug("moved data file " + dataFile + " to "
+ getDirectoryArchive());
} else {
boolean result = dataFile.delete();
if (!result) {
LOG.info("Failed to discard data file " + dataFile);
}
}
}
/**
* @return the maxFileLength
*/
public int getMaxFileLength() {
return maxFileLength;
}
/**
* @param maxFileLength the maxFileLength to set
*/
public void setMaxFileLength(int maxFileLength) {
this.maxFileLength = maxFileLength;
}
@Override
public String toString() {
return "DataManager:(" + filePrefix + ")";
}
public synchronized Location getMark() throws IllegalStateException {
return mark;
}
public synchronized Location getNextLocation(Location location) throws IOException, IllegalStateException {
Location cur = null;
while (true) {
if (cur == null) {
if (location == null) {
DataFile head = (DataFile)currentWriteFile.getHeadNode();
cur = new Location();
cur.setDataFileId(head.getDataFileId());
cur.setOffset(0);
} else {
// Set to the next offset..
if( location.getSize() == -1 ) {
cur = new Location(location);
} else {
cur = new Location(location);
cur.setOffset(location.getOffset()+location.getSize());
}
}
} else {
cur.setOffset(cur.getOffset() + cur.getSize());
}
DataFile dataFile = getDataFile(cur);
// Did it go into the next file??
if (dataFile.getLength() <= cur.getOffset()) {
dataFile = getNextDataFile(dataFile);
if (dataFile == null) {
return null;
} else {
cur.setDataFileId(dataFile.getDataFileId().intValue());
cur.setOffset(0);
}
}
// Load in location size and type.
DataFileAccessor reader = accessorPool.openDataFileAccessor(dataFile);
try {
reader.readLocationDetails(cur);
} finally {
accessorPool.closeDataFileAccessor(reader);
}
if (cur.getType() == 0) {
return null;
} else if (cur.getType() > 0) {
// Only return user records.
return cur;
}
}
}
public synchronized Location getNextLocation(File file, Location lastLocation,boolean thisFileOnly) throws IllegalStateException, IOException{
DataFile df = fileByFileMap.get(file);
return getNextLocation(df, lastLocation,thisFileOnly);
}
public synchronized Location getNextLocation(DataFile dataFile,
Location lastLocation,boolean thisFileOnly) throws IOException, IllegalStateException {
Location cur = null;
while (true) {
if (cur == null) {
if (lastLocation == null) {
DataFile head = (DataFile)dataFile.getHeadNode();
cur = new Location();
cur.setDataFileId(head.getDataFileId());
cur.setOffset(0);
} else {
// Set to the next offset..
cur = new Location(lastLocation);
cur.setOffset(cur.getOffset() + cur.getSize());
}
} else {
cur.setOffset(cur.getOffset() + cur.getSize());
}
// Did it go into the next file??
if (dataFile.getLength() <= cur.getOffset()) {
if (thisFileOnly) {
return null;
}else {
dataFile = getNextDataFile(dataFile);
if (dataFile == null) {
return null;
} else {
cur.setDataFileId(dataFile.getDataFileId().intValue());
cur.setOffset(0);
}
}
}
// Load in location size and type.
DataFileAccessor reader = accessorPool.openDataFileAccessor(dataFile);
try {
reader.readLocationDetails(cur);
} finally {
accessorPool.closeDataFileAccessor(reader);
}
if (cur.getType() == 0) {
return null;
} else if (cur.getType() > 0) {
// Only return user records.
return cur;
}
}
}
public synchronized ByteSequence read(Location location) throws IOException, IllegalStateException {
DataFile dataFile = getDataFile(location);
DataFileAccessor reader = accessorPool.openDataFileAccessor(dataFile);
ByteSequence rc = null;
try {
rc = reader.readRecord(location);
} finally {
accessorPool.closeDataFileAccessor(reader);
}
return rc;
}
public void setMark(Location location, boolean sync) throws IOException, IllegalStateException {
synchronized (this) {
mark = location;
}
storeState(sync);
}
protected synchronized void storeState(boolean sync) throws IOException {
ByteSequence state = marshallState();
appender.storeItem(state, Location.MARK_TYPE, sync);
controlFile.store(state, sync);
}
public synchronized Location write(ByteSequence data, boolean sync) throws IOException, IllegalStateException {
Location loc = appender.storeItem(data, Location.USER_TYPE, sync);
return loc;
}
public synchronized Location write(ByteSequence data, Runnable onComplete) throws IOException, IllegalStateException {
Location loc = appender.storeItem(data, Location.USER_TYPE, onComplete);
return loc;
}
public synchronized Location write(ByteSequence data, byte type, boolean sync) throws IOException, IllegalStateException {
return appender.storeItem(data, type, sync);
}
public void update(Location location, ByteSequence data, boolean sync) throws IOException {
DataFile dataFile = getDataFile(location);
DataFileAccessor updater = accessorPool.openDataFileAccessor(dataFile);
try {
updater.updateRecord(location, data, sync);
} finally {
accessorPool.closeDataFileAccessor(updater);
}
}
public File getDirectory() {
return directory;
}
public void setDirectory(File directory) {
this.directory = directory;
}
public String getFilePrefix() {
return filePrefix;
}
public void setFilePrefix(String filePrefix) {
this.filePrefix = IOHelper.toFileSystemSafeName(filePrefix);
}
public Map<WriteKey, WriteCommand> getInflightWrites() {
return inflightWrites;
}
public Location getLastAppendLocation() {
return lastAppendLocation.get();
}
public void setLastAppendLocation(Location lastSyncedLocation) {
this.lastAppendLocation.set(lastSyncedLocation);
}
public boolean isUseNio() {
return useNio;
}
public void setUseNio(boolean useNio) {
this.useNio = useNio;
}
public File getDirectoryArchive() {
return directoryArchive;
}
public void setDirectoryArchive(File directoryArchive) {
this.directoryArchive = directoryArchive;
}
public boolean isArchiveDataLogs() {
return archiveDataLogs;
}
public void setArchiveDataLogs(boolean archiveDataLogs) {
this.archiveDataLogs = archiveDataLogs;
}
synchronized public Integer getCurrentDataFileId() {
if( currentWriteFile==null )
return null;
return currentWriteFile.getDataFileId();
}
/**
* Get a set of files - only valid after start()
* @return files currently being used
*/
public Set<File> getFiles(){
return fileByFileMap.keySet();
}
synchronized public long getDiskSize() {
long rc=0;
DataFile cur = (DataFile)currentWriteFile.getHeadNode();
while( cur !=null ) {
rc += cur.getLength();
cur = (DataFile) cur.getNext();
}
return rc;
}
synchronized public long getDiskSizeUntil(Location startPosition) {
long rc=0;
DataFile cur = (DataFile)currentWriteFile.getHeadNode();
while( cur !=null ) {
if( cur.getDataFileId().intValue() >= startPosition.getDataFileId() ) {
return rc + startPosition.getOffset();
}
rc += cur.getLength();
cur = (DataFile) cur.getNext();
}
return rc;
}
}

View File

@ -1,186 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.async;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.channels.FileLock;
import java.nio.channels.OverlappingFileLockException;
import org.apache.activemq.util.ByteSequence;
import org.apache.activemq.util.IOExceptionSupport;
/**
* Use to reliably store fixed sized state data. It stores the state in record
* that is versioned and repeated twice in the file so that a failure in the
* middle of the write of the first or second record do not not result in an
* unknown state.
*
*
*/
public final class ControlFile {
private static final boolean DISABLE_FILE_LOCK = "true".equals(System.getProperty("java.nio.channels.FileLock.broken", "false"));
private final File file;
/** The File that holds the control data. */
private final RandomAccessFile randomAccessFile;
private final int maxRecordSize;
private final int firstRecordStart;
private final int secondRecordStart;
private final int firstRecordEnd;
private final int secondRecordEnd;
private long version;
private FileLock lock;
private boolean disposed;
public ControlFile(File file, int recordSize) throws IOException {
this.file = file;
this.maxRecordSize = recordSize + 4;
// Calculate where the records start and end.
this.firstRecordStart = 8;
this.secondRecordStart = 8 + maxRecordSize + 8 + 8;
this.firstRecordEnd = firstRecordStart+maxRecordSize;
this.secondRecordEnd = secondRecordStart+maxRecordSize;
randomAccessFile = new RandomAccessFile(file, "rw");
}
/**
* Locks the control file.
*
* @throws IOException
*/
public void lock() throws IOException {
if (DISABLE_FILE_LOCK) {
return;
}
if (lock == null) {
try {
lock = randomAccessFile.getChannel().tryLock(0, Math.max(1, randomAccessFile.getChannel().size()), false);
} catch (OverlappingFileLockException e) {
throw IOExceptionSupport.create("Control file '" + file + "' could not be locked.",e);
}
if (lock == null) {
throw new IOException("Control file '" + file + "' could not be locked.");
}
}
}
/**
* Un locks the control file.
*
* @throws IOException
*/
public void unlock() throws IOException {
if (DISABLE_FILE_LOCK) {
return;
}
if (lock != null) {
lock.release();
lock = null;
}
}
public void dispose() {
if (disposed) {
return;
}
disposed = true;
try {
unlock();
} catch (IOException ignore) {
}
try {
randomAccessFile.close();
} catch (IOException ignore) {
}
}
public synchronized ByteSequence load() throws IOException {
long l = randomAccessFile.length();
if (l < maxRecordSize) {
return null;
}
randomAccessFile.seek(firstRecordStart-8);
long v1 = randomAccessFile.readLong();
randomAccessFile.seek(firstRecordEnd);
long v1check = randomAccessFile.readLong();
randomAccessFile.seek(secondRecordStart - 8);
long v2 = randomAccessFile.readLong();
randomAccessFile.seek(secondRecordEnd);
long v2check = randomAccessFile.readLong();
byte[] data = null;
if (v2 == v2check) {
version = v2;
randomAccessFile.seek(secondRecordStart);
int size = randomAccessFile.readInt();
data = new byte[size];
randomAccessFile.readFully(data);
} else if (v1 == v1check) {
version = v1;
randomAccessFile.seek(firstRecordStart);
int size = randomAccessFile.readInt();
data = new byte[size];
randomAccessFile.readFully(data);
} else {
// Bummer.. Both checks are screwed. we don't know
// if any of the two buffer are ok. This should
// only happen is data got corrupted.
throw new IOException("Control data corrupted.");
}
return new ByteSequence(data, 0, data.length);
}
public void store(ByteSequence data, boolean sync) throws IOException {
version++;
randomAccessFile.setLength((maxRecordSize * 2) + 32);
randomAccessFile.seek(0);
// Write the first copy of the control data.
randomAccessFile.writeLong(version);
randomAccessFile.writeInt(data.getLength());
randomAccessFile.write(data.getData());
randomAccessFile.seek(firstRecordEnd);
randomAccessFile.writeLong(version);
// Write the second copy of the control data.
randomAccessFile.writeLong(version);
randomAccessFile.writeInt(data.getLength());
randomAccessFile.write(data.getData());
randomAccessFile.seek(secondRecordEnd);
randomAccessFile.writeLong(version);
if (sync) {
randomAccessFile.getFD().sync();
}
}
public boolean isDisposed() {
return disposed;
}
}

View File

@ -1,142 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.async;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import org.apache.activemq.util.IOHelper;
import org.apache.activemq.util.LinkedNode;
/**
* DataFile
*
*
*/
public class DataFile extends LinkedNode implements Comparable<DataFile> {
protected final File file;
protected final Integer dataFileId;
protected final int preferedSize;
protected int length;
protected int referenceCount;
DataFile(File file, int number, int preferedSize) {
this.file = file;
this.preferedSize = preferedSize;
this.dataFileId = Integer.valueOf(number);
length = (int)(file.exists() ? file.length() : 0);
}
File getFile() {
return file;
}
public Integer getDataFileId() {
return dataFileId;
}
public synchronized int getLength() {
return length;
}
public void setLength(int length) {
this.length = length;
}
public synchronized void incrementLength(int size) {
length += size;
}
public synchronized int increment() {
return ++referenceCount;
}
public synchronized int decrement() {
return --referenceCount;
}
public synchronized int getReferenceCount(){
return referenceCount;
}
public synchronized boolean isUnused() {
return referenceCount <= 0;
}
public synchronized String toString() {
String result = file.getName() + " number = " + dataFileId + " , length = " + length + " refCount = " + referenceCount;
return result;
}
public synchronized RandomAccessFile openRandomAccessFile(boolean appender) throws IOException {
RandomAccessFile rc = new RandomAccessFile(file, "rw");
// When we start to write files size them up so that the OS has a chance
// to allocate the file contiguously.
if (appender) {
if (length < preferedSize) {
try {
// this can throw if we run out of disk space
rc.setLength(preferedSize);
} catch (IOException ioe) {
try {
rc.close();
} catch(Exception ignored) {
}
throw ioe;
}
}
}
return rc;
}
public synchronized void closeRandomAccessFile(RandomAccessFile file) throws IOException {
// On close set the file size to the real size.
if (length != file.length()) {
file.setLength(getLength());
}
file.close();
}
public synchronized boolean delete() throws IOException {
return file.delete();
}
public synchronized void move(File targetDirectory) throws IOException{
IOHelper.moveFile(file,targetDirectory);
}
public int compareTo(DataFile df) {
return dataFileId - df.dataFileId;
}
@Override
public boolean equals(Object o) {
boolean result = false;
if (o instanceof DataFile) {
result = compareTo((DataFile)o) == 0;
}
return result;
}
@Override
public int hashCode() {
return dataFileId;
}
}

View File

@ -1,154 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.async;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.util.Map;
import org.apache.activemq.kaha.impl.async.DataFileAppender.WriteCommand;
import org.apache.activemq.kaha.impl.async.DataFileAppender.WriteKey;
import org.apache.activemq.util.ByteSequence;
/**
* Optimized Store reader and updater. Single threaded and synchronous. Use in
* conjunction with the DataFileAccessorPool of concurrent use.
*
*
*/
final class DataFileAccessor {
private final DataFile dataFile;
private final Map<WriteKey, WriteCommand> inflightWrites;
private final RandomAccessFile file;
private boolean disposed;
/**
* Construct a Store reader
*
* @param fileId
* @throws IOException
*/
public DataFileAccessor(AsyncDataManager dataManager, DataFile dataFile) throws IOException {
this.dataFile = dataFile;
this.inflightWrites = dataManager.getInflightWrites();
this.file = dataFile.openRandomAccessFile(false);
}
public DataFile getDataFile() {
return dataFile;
}
public void dispose() {
if (disposed) {
return;
}
disposed = true;
try {
dataFile.closeRandomAccessFile(file);
} catch (IOException e) {
e.printStackTrace();
}
}
public ByteSequence readRecord(Location location) throws IOException {
if (!location.isValid()) {
throw new IOException("Invalid location: " + location);
}
WriteCommand asyncWrite = (WriteCommand)inflightWrites.get(new WriteKey(location));
if (asyncWrite != null) {
return asyncWrite.data;
}
try {
if (location.getSize() == Location.NOT_SET) {
file.seek(location.getOffset());
location.setSize(file.readInt());
file.seek(location.getOffset() + AsyncDataManager.ITEM_HEAD_SPACE);
} else {
file.seek(location.getOffset() + AsyncDataManager.ITEM_HEAD_SPACE);
}
byte[] data = new byte[location.getSize() - AsyncDataManager.ITEM_HEAD_FOOT_SPACE];
file.readFully(data);
return new ByteSequence(data, 0, data.length);
} catch (RuntimeException e) {
throw new IOException("Invalid location: " + location + ", : " + e);
}
}
public void readLocationDetails(Location location) throws IOException {
WriteCommand asyncWrite = (WriteCommand)inflightWrites.get(new WriteKey(location));
if (asyncWrite != null) {
location.setSize(asyncWrite.location.getSize());
location.setType(asyncWrite.location.getType());
} else {
file.seek(location.getOffset());
location.setSize(file.readInt());
location.setType(file.readByte());
}
}
public boolean readLocationDetailsAndValidate(Location location) {
try {
WriteCommand asyncWrite = (WriteCommand)inflightWrites.get(new WriteKey(location));
if (asyncWrite != null) {
location.setSize(asyncWrite.location.getSize());
location.setType(asyncWrite.location.getType());
} else {
file.seek(location.getOffset());
location.setSize(file.readInt());
location.setType(file.readByte());
byte data[] = new byte[3];
file.seek(location.getOffset() + AsyncDataManager.ITEM_HEAD_OFFSET_TO_SOR);
file.readFully(data);
if (data[0] != AsyncDataManager.ITEM_HEAD_SOR[0]
|| data[1] != AsyncDataManager.ITEM_HEAD_SOR[1]
|| data[2] != AsyncDataManager.ITEM_HEAD_SOR[2]) {
return false;
}
file.seek(location.getOffset() + location.getSize() - AsyncDataManager.ITEM_FOOT_SPACE);
file.readFully(data);
if (data[0] != AsyncDataManager.ITEM_HEAD_EOR[0]
|| data[1] != AsyncDataManager.ITEM_HEAD_EOR[1]
|| data[2] != AsyncDataManager.ITEM_HEAD_EOR[2]) {
return false;
}
}
} catch (IOException e) {
return false;
}
return true;
}
public void updateRecord(Location location, ByteSequence data, boolean sync) throws IOException {
file.seek(location.getOffset() + AsyncDataManager.ITEM_HEAD_SPACE);
int size = Math.min(data.getLength(), location.getSize());
file.write(data.getData(), data.getOffset(), size);
if (sync) {
file.getFD().sync();
}
}
}

View File

@ -1,163 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.async;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
/**
* Used to pool DataFileAccessors.
*
* @author chirino
*/
public class DataFileAccessorPool {
private final AsyncDataManager dataManager;
private final Map<Integer, Pool> pools = new HashMap<Integer, Pool>();
private boolean closed;
private int maxOpenReadersPerFile = 5;
class Pool {
private final DataFile file;
private final List<DataFileAccessor> pool = new ArrayList<DataFileAccessor>();
private boolean used;
private int openCounter;
private boolean disposed;
public Pool(DataFile file) {
this.file = file;
}
public DataFileAccessor openDataFileReader() throws IOException {
DataFileAccessor rc = null;
if (pool.isEmpty()) {
rc = new DataFileAccessor(dataManager, file);
} else {
rc = (DataFileAccessor)pool.remove(pool.size() - 1);
}
used = true;
openCounter++;
return rc;
}
public synchronized void closeDataFileReader(DataFileAccessor reader) {
openCounter--;
if (pool.size() >= maxOpenReadersPerFile || disposed) {
reader.dispose();
} else {
pool.add(reader);
}
}
public synchronized void clearUsedMark() {
used = false;
}
public synchronized boolean isUsed() {
return used;
}
public synchronized void dispose() {
for (DataFileAccessor reader : pool) {
reader.dispose();
}
pool.clear();
disposed = true;
}
public synchronized int getOpenCounter() {
return openCounter;
}
}
public DataFileAccessorPool(AsyncDataManager dataManager) {
this.dataManager = dataManager;
}
synchronized void clearUsedMark() {
for (Iterator iter = pools.values().iterator(); iter.hasNext();) {
Pool pool = (Pool)iter.next();
pool.clearUsedMark();
}
}
synchronized void disposeUnused() {
for (Iterator<Pool> iter = pools.values().iterator(); iter.hasNext();) {
Pool pool = iter.next();
if (!pool.isUsed()) {
pool.dispose();
iter.remove();
}
}
}
synchronized void disposeDataFileAccessors(DataFile dataFile) {
if (closed) {
throw new IllegalStateException("Closed.");
}
Pool pool = pools.get(dataFile.getDataFileId());
if (pool != null) {
if (pool.getOpenCounter() == 0) {
pool.dispose();
pools.remove(dataFile.getDataFileId());
} else {
throw new IllegalStateException("The data file is still in use: " + dataFile + ", use count: " + pool.getOpenCounter());
}
}
}
synchronized DataFileAccessor openDataFileAccessor(DataFile dataFile) throws IOException {
if (closed) {
throw new IOException("Closed.");
}
Pool pool = pools.get(dataFile.getDataFileId());
if (pool == null) {
pool = new Pool(dataFile);
pools.put(dataFile.getDataFileId(), pool);
}
return pool.openDataFileReader();
}
synchronized void closeDataFileAccessor(DataFileAccessor reader) {
Pool pool = pools.get(reader.getDataFile().getDataFileId());
if (pool == null || closed) {
reader.dispose();
} else {
pool.closeDataFileReader(reader);
}
}
public synchronized void close() {
if (closed) {
return;
}
closed = true;
for (Iterator<Pool> iter = pools.values().iterator(); iter.hasNext();) {
Pool pool = iter.next();
pool.dispose();
}
pools.clear();
}
}

View File

@ -1,440 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.async;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.io.RandomAccessFile;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.activemq.util.ByteSequence;
import org.apache.activemq.util.DataByteArrayOutputStream;
import org.apache.activemq.util.LinkedNode;
/**
* An optimized writer to do batch appends to a data file. This object is thread
* safe and gains throughput as you increase the number of concurrent writes it
* does.
*
*
*/
class DataFileAppender {
protected static final byte[] RESERVED_SPACE = new byte[AsyncDataManager.ITEM_HEAD_RESERVED_SPACE];
protected static final int DEFAULT_MAX_BATCH_SIZE = 1024 * 1024 * 4;
protected final AsyncDataManager dataManager;
protected final Map<WriteKey, WriteCommand> inflightWrites;
protected final Object enqueueMutex = new Object(){};
protected WriteBatch nextWriteBatch;
protected boolean shutdown;
protected IOException firstAsyncException;
protected final CountDownLatch shutdownDone = new CountDownLatch(1);
protected int maxWriteBatchSize = DEFAULT_MAX_BATCH_SIZE;
protected boolean running;
private Thread thread;
public static class WriteKey {
private final int file;
private final long offset;
private final int hash;
public WriteKey(Location item) {
file = item.getDataFileId();
offset = item.getOffset();
// TODO: see if we can build a better hash
hash = (int)(file ^ offset);
}
public int hashCode() {
return hash;
}
public boolean equals(Object obj) {
if (obj instanceof WriteKey) {
WriteKey di = (WriteKey)obj;
return di.file == file && di.offset == offset;
}
return false;
}
}
public class WriteBatch {
public final DataFile dataFile;
public final WriteCommand first;
public final CountDownLatch latch = new CountDownLatch(1);
public int size;
public AtomicReference<IOException> exception = new AtomicReference<IOException>();
public WriteBatch(DataFile dataFile, WriteCommand write) throws IOException {
this.dataFile = dataFile;
this.first = write;
size += write.location.getSize();
}
public boolean canAppend(DataFile dataFile, WriteCommand write) {
if (dataFile != this.dataFile) {
return false;
}
if (size + write.location.getSize() >= maxWriteBatchSize) {
return false;
}
return true;
}
public void append(WriteCommand write) throws IOException {
this.first.getTailNode().linkAfter(write);
size += write.location.getSize();
}
}
public static class WriteCommand extends LinkedNode {
public final Location location;
public final ByteSequence data;
final boolean sync;
public final Runnable onComplete;
public WriteCommand(Location location, ByteSequence data, boolean sync) {
this.location = location;
this.data = data;
this.sync = sync;
this.onComplete=null;
}
public WriteCommand(Location location, ByteSequence data, Runnable onComplete) {
this.location = location;
this.data = data;
this.onComplete = onComplete;
this.sync = false;
}
}
/**
* Construct a Store writer
*
* @param fileId
*/
public DataFileAppender(AsyncDataManager dataManager) {
this.dataManager = dataManager;
this.inflightWrites = this.dataManager.getInflightWrites();
}
/**
* @param type
* @param marshaller
* @param payload
* @param type
* @param sync
* @return
* @throws IOException
* @throws
* @throws
*/
public Location storeItem(ByteSequence data, byte type, boolean sync) throws IOException {
// Write the packet our internal buffer.
int size = data.getLength() + AsyncDataManager.ITEM_HEAD_FOOT_SPACE;
final Location location = new Location();
location.setSize(size);
location.setType(type);
WriteBatch batch;
WriteCommand write = new WriteCommand(location, data, sync);
// Locate datafile and enqueue into the executor in sychronized block so
// that writes get equeued onto the executor in order that they were assigned
// by the data manager (which is basically just appending)
synchronized (this) {
// Find the position where this item will land at.
DataFile dataFile = dataManager.allocateLocation(location);
if( !sync ) {
inflightWrites.put(new WriteKey(location), write);
}
batch = enqueue(dataFile, write);
}
location.setLatch(batch.latch);
if (sync) {
try {
batch.latch.await();
} catch (InterruptedException e) {
throw new InterruptedIOException();
}
IOException exception = batch.exception.get();
if (exception != null) {
throw exception;
}
}
return location;
}
public Location storeItem(ByteSequence data, byte type, Runnable onComplete) throws IOException {
// Write the packet our internal buffer.
int size = data.getLength() + AsyncDataManager.ITEM_HEAD_FOOT_SPACE;
final Location location = new Location();
location.setSize(size);
location.setType(type);
WriteBatch batch;
WriteCommand write = new WriteCommand(location, data, onComplete);
// Locate datafile and enqueue into the executor in sychronized block so
// that writes get equeued onto the executor in order that they were assigned
// by the data manager (which is basically just appending)
synchronized (this) {
// Find the position where this item will land at.
DataFile dataFile = dataManager.allocateLocation(location);
inflightWrites.put(new WriteKey(location), write);
batch = enqueue(dataFile, write);
}
location.setLatch(batch.latch);
return location;
}
private WriteBatch enqueue(DataFile dataFile, WriteCommand write) throws IOException {
synchronized (enqueueMutex) {
WriteBatch rc = null;
if (shutdown) {
throw new IOException("Async Writter Thread Shutdown");
}
if (!running) {
running = true;
thread = new Thread() {
public void run() {
processQueue();
}
};
thread.setPriority(Thread.MAX_PRIORITY);
thread.setDaemon(true);
thread.setName("ActiveMQ Data File Writer");
thread.start();
firstAsyncException = null;
}
if (firstAsyncException != null) {
throw firstAsyncException;
}
if (nextWriteBatch == null) {
nextWriteBatch = new WriteBatch(dataFile, write);
rc = nextWriteBatch;
enqueueMutex.notify();
} else {
// Append to current batch if possible..
if (nextWriteBatch.canAppend(dataFile, write)) {
nextWriteBatch.append(write);
rc = nextWriteBatch;
} else {
// Otherwise wait for the queuedCommand to be null
try {
while (nextWriteBatch != null) {
enqueueMutex.wait();
}
} catch (InterruptedException e) {
throw new InterruptedIOException();
}
if (shutdown) {
throw new IOException("Async Writter Thread Shutdown");
}
// Start a new batch.
nextWriteBatch = new WriteBatch(dataFile, write);
rc = nextWriteBatch;
enqueueMutex.notify();
}
}
return rc;
}
}
public void close() throws IOException {
synchronized (enqueueMutex) {
if (!shutdown) {
shutdown = true;
if (running) {
enqueueMutex.notifyAll();
} else {
shutdownDone.countDown();
}
}
}
try {
shutdownDone.await();
} catch (InterruptedException e) {
throw new InterruptedIOException();
}
}
/**
* The async processing loop that writes to the data files and does the
* force calls.
*
* Since the file sync() call is the slowest of all the operations, this
* algorithm tries to 'batch' or group together several file sync() requests
* into a single file sync() call. The batching is accomplished attaching
* the same CountDownLatch instance to every force request in a group.
*
*/
protected void processQueue() {
DataFile dataFile = null;
RandomAccessFile file = null;
WriteBatch wb = null;
try {
DataByteArrayOutputStream buff = new DataByteArrayOutputStream(maxWriteBatchSize);
while (true) {
Object o = null;
// Block till we get a command.
synchronized (enqueueMutex) {
while (true) {
if (nextWriteBatch != null) {
o = nextWriteBatch;
nextWriteBatch = null;
break;
}
if (shutdown) {
return;
}
enqueueMutex.wait();
}
enqueueMutex.notify();
}
wb = (WriteBatch)o;
if (dataFile != wb.dataFile) {
if (file != null) {
dataFile.closeRandomAccessFile(file);
}
dataFile = wb.dataFile;
file = dataFile.openRandomAccessFile(true);
}
WriteCommand write = wb.first;
// Write all the data.
// Only need to seek to first location.. all others
// are in sequence.
file.seek(write.location.getOffset());
boolean forceToDisk=false;
//
// is it just 1 big write?
if (wb.size == write.location.getSize()) {
forceToDisk = write.sync | write.onComplete!=null;
// Just write it directly..
file.writeInt(write.location.getSize());
file.writeByte(write.location.getType());
file.write(RESERVED_SPACE);
file.write(AsyncDataManager.ITEM_HEAD_SOR);
file.write(write.data.getData(), write.data.getOffset(), write.data.getLength());
file.write(AsyncDataManager.ITEM_HEAD_EOR);
} else {
// Combine the smaller writes into 1 big buffer
while (write != null) {
forceToDisk |= write.sync | write.onComplete!=null;
buff.writeInt(write.location.getSize());
buff.writeByte(write.location.getType());
buff.write(RESERVED_SPACE);
buff.write(AsyncDataManager.ITEM_HEAD_SOR);
buff.write(write.data.getData(), write.data.getOffset(), write.data.getLength());
buff.write(AsyncDataManager.ITEM_HEAD_EOR);
write = (WriteCommand)write.getNext();
}
// Now do the 1 big write.
ByteSequence sequence = buff.toByteSequence();
file.write(sequence.getData(), sequence.getOffset(), sequence.getLength());
buff.reset();
}
if( forceToDisk ) {
file.getFD().sync();
}
WriteCommand lastWrite = (WriteCommand)wb.first.getTailNode();
dataManager.setLastAppendLocation(lastWrite.location);
// Now that the data is on disk, remove the writes from the in
// flight
// cache.
write = wb.first;
while (write != null) {
if (!write.sync) {
inflightWrites.remove(new WriteKey(write.location));
}
if( write.onComplete !=null ) {
try {
write.onComplete.run();
} catch (Throwable e) {
e.printStackTrace();
}
}
write = (WriteCommand)write.getNext();
}
// Signal any waiting threads that the write is on disk.
wb.latch.countDown();
}
} catch (IOException e) {
synchronized (enqueueMutex) {
firstAsyncException = e;
if (wb != null) {
wb.latch.countDown();
wb.exception.set(e);
}
if (nextWriteBatch != null) {
nextWriteBatch.latch.countDown();
nextWriteBatch.exception.set(e);
}
}
} catch (InterruptedException e) {
} finally {
try {
if (file != null) {
dataFile.closeRandomAccessFile(file);
}
} catch (Throwable ignore) {
}
shutdownDone.countDown();
}
}
}

View File

@ -1,159 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.async;
import java.io.IOException;
import org.apache.activemq.kaha.Marshaller;
import org.apache.activemq.kaha.StoreLocation;
import org.apache.activemq.kaha.impl.data.RedoListener;
import org.apache.activemq.util.ByteSequence;
import org.apache.activemq.util.DataByteArrayInputStream;
import org.apache.activemq.util.DataByteArrayOutputStream;
/**
* Provides a Kaha DataManager Facade to the DataManager.
*
*
*/
public final class DataManagerFacade implements org.apache.activemq.kaha.impl.DataManager {
private static final ByteSequence FORCE_COMMAND = new ByteSequence(new byte[] {'F', 'O', 'R', 'C', 'E'});
private AsyncDataManager dataManager;
private final String name;
private Marshaller redoMarshaller;
private static class StoreLocationFacade implements StoreLocation {
private final Location location;
public StoreLocationFacade(Location location) {
this.location = location;
}
public int getFile() {
return location.getDataFileId();
}
public long getOffset() {
return location.getOffset();
}
public int getSize() {
return location.getSize();
}
public Location getLocation() {
return location;
}
}
public DataManagerFacade(AsyncDataManager dataManager, String name) {
this.dataManager = dataManager;
this.name = name;
}
private static StoreLocation convertToStoreLocation(Location location) {
if (location == null) {
return null;
}
return new StoreLocationFacade(location);
}
private static Location convertFromStoreLocation(StoreLocation location) {
if (location == null) {
return null;
}
if (location.getClass() == StoreLocationFacade.class) {
return ((StoreLocationFacade)location).getLocation();
}
Location l = new Location();
l.setOffset((int)location.getOffset());
l.setSize(location.getSize());
l.setDataFileId(location.getFile());
return l;
}
public Object readItem(Marshaller marshaller, StoreLocation location) throws IOException {
ByteSequence sequence = dataManager.read(convertFromStoreLocation(location));
DataByteArrayInputStream dataIn = new DataByteArrayInputStream(sequence);
return marshaller.readPayload(dataIn);
}
public StoreLocation storeDataItem(Marshaller marshaller, Object payload) throws IOException {
final DataByteArrayOutputStream buffer = new DataByteArrayOutputStream();
marshaller.writePayload(payload, buffer);
ByteSequence data = buffer.toByteSequence();
return convertToStoreLocation(dataManager.write(data, (byte)1, false));
}
public void force() throws IOException {
dataManager.write(FORCE_COMMAND, (byte)2, true);
}
public void updateItem(StoreLocation location, Marshaller marshaller, Object payload) throws IOException {
final DataByteArrayOutputStream buffer = new DataByteArrayOutputStream();
marshaller.writePayload(payload, buffer);
ByteSequence data = buffer.toByteSequence();
dataManager.update(convertFromStoreLocation(location), data, false);
}
public void close() throws IOException {
dataManager.close();
}
public void consolidateDataFiles() throws IOException {
dataManager.consolidateDataFiles();
}
public boolean delete() throws IOException {
return dataManager.delete();
}
public void addInterestInFile(int file) throws IOException {
dataManager.addInterestInFile(file);
}
public void removeInterestInFile(int file) throws IOException {
dataManager.removeInterestInFile(file);
}
public void recoverRedoItems(RedoListener listener) throws IOException {
throw new RuntimeException("Not Implemented..");
}
public StoreLocation storeRedoItem(Object payload) throws IOException {
throw new RuntimeException("Not Implemented..");
}
public Marshaller getRedoMarshaller() {
return redoMarshaller;
}
public void setRedoMarshaller(Marshaller redoMarshaller) {
this.redoMarshaller = redoMarshaller;
}
public String getName() {
return name;
}
}

View File

@ -1,115 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.async;
import java.io.IOException;
import org.apache.activeio.journal.InvalidRecordLocationException;
import org.apache.activeio.journal.Journal;
import org.apache.activeio.journal.JournalEventListener;
import org.apache.activeio.journal.RecordLocation;
import org.apache.activeio.packet.ByteArrayPacket;
import org.apache.activeio.packet.Packet;
import org.apache.activemq.util.ByteSequence;
/**
* Provides a Journal Facade to the DataManager.
*
*
*/
public final class JournalFacade implements Journal {
private final AsyncDataManager dataManager;
public static class RecordLocationFacade implements RecordLocation {
private final Location location;
public RecordLocationFacade(Location location) {
this.location = location;
}
public Location getLocation() {
return location;
}
public int compareTo(Object o) {
RecordLocationFacade rlf = (RecordLocationFacade)o;
int rc = location.compareTo(rlf.location);
return rc;
}
}
public JournalFacade(AsyncDataManager dataManager) {
this.dataManager = dataManager;
}
private static RecordLocation convertToRecordLocation(Location location) {
if (location == null) {
return null;
}
return new RecordLocationFacade(location);
}
private static Location convertFromRecordLocation(RecordLocation location) {
if (location == null) {
return null;
}
return ((RecordLocationFacade)location).getLocation();
}
public void close() throws IOException {
dataManager.close();
}
public RecordLocation getMark() throws IllegalStateException {
return convertToRecordLocation(dataManager.getMark());
}
public RecordLocation getNextRecordLocation(RecordLocation location) throws InvalidRecordLocationException, IOException, IllegalStateException {
return convertToRecordLocation(dataManager.getNextLocation(convertFromRecordLocation(location)));
}
public Packet read(RecordLocation location) throws InvalidRecordLocationException, IOException, IllegalStateException {
ByteSequence rc = dataManager.read(convertFromRecordLocation(location));
if (rc == null) {
return null;
}
return new ByteArrayPacket(rc.getData(), rc.getOffset(), rc.getLength());
}
public void setJournalEventListener(JournalEventListener listener) throws IllegalStateException {
}
public void setMark(RecordLocation location, boolean sync) throws InvalidRecordLocationException, IOException, IllegalStateException {
dataManager.setMark(convertFromRecordLocation(location), sync);
}
public RecordLocation write(Packet packet, boolean sync) throws IOException, IllegalStateException {
org.apache.activeio.packet.ByteSequence data = packet.asByteSequence();
ByteSequence sequence = new ByteSequence(data.getData(), data.getOffset(), data.getLength());
return convertToRecordLocation(dataManager.write(sequence, sync));
}
public RecordLocation write(Packet packet, Runnable onComplete) throws IOException, IllegalStateException {
org.apache.activeio.packet.ByteSequence data = packet.asByteSequence();
ByteSequence sequence = new ByteSequence(data.getData(), data.getOffset(), data.getLength());
return convertToRecordLocation(dataManager.write(sequence, onComplete));
}
}

View File

@ -1,150 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.async;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.concurrent.CountDownLatch;
/**
* Used as a location in the data store.
*
*
*/
public final class Location implements Comparable<Location> {
public static final byte MARK_TYPE = -1;
public static final byte USER_TYPE = 1;
public static final byte NOT_SET_TYPE = 0;
public static final int NOT_SET = -1;
private int dataFileId = NOT_SET;
private int offset = NOT_SET;
private int size = NOT_SET;
private byte type = NOT_SET_TYPE;
private CountDownLatch latch;
public Location() {
}
Location(Location item) {
this.dataFileId = item.dataFileId;
this.offset = item.offset;
this.size = item.size;
this.type = item.type;
}
boolean isValid() {
return dataFileId != NOT_SET;
}
/**
* @return the size of the data record including the header.
*/
public int getSize() {
return size;
}
/**
* @param size the size of the data record including the header.
*/
public void setSize(int size) {
this.size = size;
}
/**
* @return the size of the payload of the record.
*/
public int getPaylodSize() {
return size - AsyncDataManager.ITEM_HEAD_FOOT_SPACE;
}
public int getOffset() {
return offset;
}
public void setOffset(int offset) {
this.offset = offset;
}
public int getDataFileId() {
return dataFileId;
}
public void setDataFileId(int file) {
this.dataFileId = file;
}
public byte getType() {
return type;
}
public void setType(byte type) {
this.type = type;
}
public String toString() {
String result = "offset = " + offset + ", file = " + dataFileId + ", size = " + size + ", type = "
+ type;
return result;
}
public void writeExternal(DataOutput dos) throws IOException {
dos.writeInt(dataFileId);
dos.writeInt(offset);
dos.writeInt(size);
dos.writeByte(type);
}
public void readExternal(DataInput dis) throws IOException {
dataFileId = dis.readInt();
offset = dis.readInt();
size = dis.readInt();
type = dis.readByte();
}
public CountDownLatch getLatch() {
return latch;
}
public void setLatch(CountDownLatch latch) {
this.latch = latch;
}
public int compareTo(Location o) {
Location l = (Location)o;
if (dataFileId == l.dataFileId) {
int rc = offset - l.offset;
return rc;
}
return dataFileId - l.dataFileId;
}
public boolean equals(Object o) {
boolean result = false;
if (o instanceof Location) {
result = compareTo((Location)o) == 0;
}
return result;
}
public int hashCode() {
return dataFileId ^ offset;
}
}

View File

@ -1,239 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.async;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
/**
* An AsyncDataFileAppender that uses NIO ByteBuffers and File chanels to more
* efficently copy data to files.
*
*
*/
class NIODataFileAppender extends DataFileAppender {
public NIODataFileAppender(AsyncDataManager fileManager) {
super(fileManager);
}
/**
* The async processing loop that writes to the data files and does the
* force calls.
*
* Since the file sync() call is the slowest of all the operations, this
* algorithm tries to 'batch' or group together several file sync() requests
* into a single file sync() call. The batching is accomplished attaching
* the same CountDownLatch instance to every force request in a group.
*
*/
protected void processQueue() {
DataFile dataFile = null;
RandomAccessFile file = null;
FileChannel channel = null;
WriteBatch wb = null;
try {
ByteBuffer header = ByteBuffer.allocateDirect(AsyncDataManager.ITEM_HEAD_SPACE);
ByteBuffer footer = ByteBuffer.allocateDirect(AsyncDataManager.ITEM_FOOT_SPACE);
ByteBuffer buffer = ByteBuffer.allocateDirect(maxWriteBatchSize);
// Populate the static parts of the headers and footers..
header.putInt(0); // size
header.put((byte)0); // type
header.put(RESERVED_SPACE); // reserved
header.put(AsyncDataManager.ITEM_HEAD_SOR);
footer.put(AsyncDataManager.ITEM_HEAD_EOR);
while (true) {
Object o = null;
// Block till we get a command.
synchronized (enqueueMutex) {
while (true) {
if (nextWriteBatch != null) {
o = nextWriteBatch;
nextWriteBatch = null;
break;
}
if (shutdown) {
return;
}
enqueueMutex.wait();
}
enqueueMutex.notify();
}
wb = (WriteBatch)o;
if (dataFile != wb.dataFile) {
if (file != null) {
dataFile.closeRandomAccessFile(file);
}
dataFile = wb.dataFile;
file = dataFile.openRandomAccessFile(true);
channel = file.getChannel();
}
WriteCommand write = wb.first;
// Write all the data.
// Only need to seek to first location.. all others
// are in sequence.
file.seek(write.location.getOffset());
boolean forceToDisk=false;
//
// is it just 1 big write?
if (wb.size == write.location.getSize()) {
forceToDisk = write.sync | write.onComplete!=null;
header.clear();
header.putInt(write.location.getSize());
header.put(write.location.getType());
header.clear();
transfer(header, channel);
ByteBuffer source = ByteBuffer.wrap(write.data.getData(), write.data.getOffset(),
write.data.getLength());
transfer(source, channel);
footer.clear();
transfer(footer, channel);
} else {
// Combine the smaller writes into 1 big buffer
while (write != null) {
forceToDisk |= write.sync | write.onComplete!=null;
header.clear();
header.putInt(write.location.getSize());
header.put(write.location.getType());
header.clear();
copy(header, buffer);
assert !header.hasRemaining();
ByteBuffer source = ByteBuffer.wrap(write.data.getData(), write.data.getOffset(),
write.data.getLength());
copy(source, buffer);
assert !source.hasRemaining();
footer.clear();
copy(footer, buffer);
assert !footer.hasRemaining();
write = (WriteCommand)write.getNext();
}
// Fully write out the buffer..
buffer.flip();
transfer(buffer, channel);
buffer.clear();
}
if( forceToDisk ) {
file.getChannel().force(false);
}
WriteCommand lastWrite = (WriteCommand)wb.first.getTailNode();
dataManager.setLastAppendLocation(lastWrite.location);
// Now that the data is on disk, remove the writes from the in
// flight
// cache.
write = wb.first;
while (write != null) {
if (!write.sync) {
inflightWrites.remove(new WriteKey(write.location));
}
if (write.onComplete != null) {
try {
write.onComplete.run();
} catch (Throwable e) {
e.printStackTrace();
}
}
write = (WriteCommand)write.getNext();
}
// Signal any waiting threads that the write is on disk.
wb.latch.countDown();
}
} catch (IOException e) {
synchronized (enqueueMutex) {
firstAsyncException = e;
if (wb != null) {
wb.latch.countDown();
wb.exception.set(e);
}
if (nextWriteBatch != null) {
nextWriteBatch.latch.countDown();
nextWriteBatch.exception.set(e);
}
}
} catch (InterruptedException e) {
} finally {
try {
if (file != null) {
dataFile.closeRandomAccessFile(file);
dataFile = null;
file.close();
file = null;
}
if (channel != null) {
channel.close();
channel = null;
}
} catch (IOException e) {
}
shutdownDone.countDown();
running = false;
}
}
/**
* Copy the bytes in header to the channel.
*
* @param header - source of data
* @param channel - destination where the data will be written.
* @throws IOException
*/
private void transfer(ByteBuffer header, FileChannel channel) throws IOException {
while (header.hasRemaining()) {
channel.write(header);
}
}
private int copy(ByteBuffer src, ByteBuffer dest) {
int rc = Math.min(dest.remaining(), src.remaining());
if (rc > 0) {
// Adjust our limit so that we don't overflow the dest buffer.
int limit = src.limit();
src.limit(src.position() + rc);
dest.put(src);
// restore the limit.
src.limit(limit);
}
return rc;
}
}

View File

@ -1,131 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.async;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* An AsyncDataManager that works in read only mode against multiple data directories.
* Useful for reading back archived data files.
*/
public class ReadOnlyAsyncDataManager extends AsyncDataManager {
private static final Logger LOG = LoggerFactory.getLogger(ReadOnlyAsyncDataManager.class);
private final ArrayList<File> dirs;
public ReadOnlyAsyncDataManager(final ArrayList<File> dirs) {
this.dirs = dirs;
}
@SuppressWarnings("unchecked")
public synchronized void start() throws IOException {
if (started) {
return;
}
started = true;
accessorPool = new DataFileAccessorPool(this);
ArrayList<File> files = new ArrayList<File>();
for (File directory : dirs) {
final File d = directory;
File[] f = d.listFiles(new FilenameFilter() {
public boolean accept(File dir, String n) {
return dir.equals(d) && n.startsWith(filePrefix);
}
});
for (int i = 0; i < f.length; i++) {
files.add(f[i]);
}
}
for (File file : files) {
try {
String n = file.getName();
String numStr = n.substring(filePrefix.length(), n.length());
int num = Integer.parseInt(numStr);
DataFile dataFile = new ReadOnlyDataFile(file, num, preferedFileLength);
fileMap.put(dataFile.getDataFileId(), dataFile);
storeSize.addAndGet(dataFile.getLength());
} catch (NumberFormatException e) {
// Ignore file that do not match the pattern.
}
}
// Sort the list so that we can link the DataFiles together in the
// right order.
List<DataFile> dataFiles = new ArrayList<DataFile>(fileMap.values());
Collections.sort(dataFiles);
currentWriteFile = null;
for (DataFile df : dataFiles) {
if (currentWriteFile != null) {
currentWriteFile.linkAfter(df);
}
currentWriteFile = df;
fileByFileMap.put(df.getFile(), df);
}
// Need to check the current Write File to see if there was a partial
// write to it.
if (currentWriteFile != null) {
// See if the lastSyncedLocation is valid..
Location l = lastAppendLocation.get();
if (l != null && l.getDataFileId() != currentWriteFile.getDataFileId().intValue()) {
l = null;
}
}
}
public synchronized void close() throws IOException {
if (!started) {
return;
}
accessorPool.close();
fileMap.clear();
fileByFileMap.clear();
started = false;
}
public Location getFirstLocation() throws IllegalStateException, IOException {
if( currentWriteFile == null ) {
return null;
}
DataFile first = (DataFile)currentWriteFile.getHeadNode();
Location cur = new Location();
cur.setDataFileId(first.getDataFileId());
cur.setOffset(0);
cur.setSize(0);
return getNextLocation(cur);
}
@Override
public synchronized boolean delete() throws IOException {
throw new RuntimeException("Cannot delete a ReadOnlyAsyncDataManager");
}
}

View File

@ -1,58 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.async;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
/**
* Allows you to open a data file in read only mode. Useful when working with
* archived data files.
*/
public class ReadOnlyDataFile extends DataFile {
ReadOnlyDataFile(File file, int number, int preferedSize) {
super(file, number, preferedSize);
}
public RandomAccessFile openRandomAccessFile(boolean appender) throws IOException {
RandomAccessFile rc = new RandomAccessFile(file, "r");
// When we start to write files size them up so that the OS has a chance
// to allocate the file contigously.
if (appender) {
if (length < preferedSize) {
rc.setLength(preferedSize);
}
}
return rc;
}
public void closeRandomAccessFile(RandomAccessFile file) throws IOException {
file.close();
}
public synchronized boolean delete() throws IOException {
throw new RuntimeException("Not valid on a read only file.");
}
public synchronized void move(File targetDirectory) throws IOException{
throw new RuntimeException("Not valid on a read only file.");
}
}

View File

@ -1,25 +0,0 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<html>
<head>
</head>
<body>
journal based data storage - scalable and fast
</body>
</html>

View File

@ -1,230 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.container;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.activemq.kaha.ContainerId;
import org.apache.activemq.kaha.RuntimeStoreException;
import org.apache.activemq.kaha.StoreEntry;
import org.apache.activemq.kaha.impl.DataManager;
import org.apache.activemq.kaha.impl.data.Item;
import org.apache.activemq.kaha.impl.index.DiskIndexLinkedList;
import org.apache.activemq.kaha.impl.index.IndexItem;
import org.apache.activemq.kaha.impl.index.IndexLinkedList;
import org.apache.activemq.kaha.impl.index.IndexManager;
import org.apache.activemq.kaha.impl.index.VMIndexLinkedList;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Implementation of a ListContainer
*
*
*/
public abstract class BaseContainerImpl {
private static final Logger LOG = LoggerFactory.getLogger(BaseContainerImpl.class);
protected IndexItem root;
protected IndexLinkedList indexList;
protected IndexManager indexManager;
protected DataManager dataManager;
protected ContainerId containerId;
protected boolean loaded;
protected boolean closed;
protected boolean initialized;
protected boolean persistentIndex;
protected BaseContainerImpl(ContainerId id, IndexItem root, IndexManager indexManager, DataManager dataManager, boolean persistentIndex) {
this.containerId = id;
this.root = root;
this.indexManager = indexManager;
this.dataManager = dataManager;
this.persistentIndex = persistentIndex;
}
public ContainerId getContainerId() {
return containerId;
}
public synchronized void init() {
if (!initialized) {
if (!initialized) {
initialized = true;
if (this.indexList == null) {
if (persistentIndex) {
this.indexList = new DiskIndexLinkedList(indexManager, root);
} else {
this.indexList = new VMIndexLinkedList(root);
}
}
}
}
}
public synchronized void clear() {
if (indexList != null) {
indexList.clear();
}
}
/**
* @return the indexList
*/
public IndexLinkedList getList() {
return indexList;
}
/**
* @param indexList the indexList to set
*/
public void setList(IndexLinkedList indexList) {
this.indexList = indexList;
}
public abstract void unload();
public abstract void load();
public abstract int size();
protected abstract Object getValue(StoreEntry currentItem);
protected abstract void remove(IndexItem currentItem);
protected final synchronized IndexLinkedList getInternalList() {
return indexList;
}
public final synchronized void close() {
unload();
closed = true;
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.ListContainer#isLoaded()
*/
public final synchronized boolean isLoaded() {
checkClosed();
return loaded;
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.ListContainer#getId()
*/
public final Object getId() {
checkClosed();
return containerId.getKey();
}
public DataManager getDataManager() {
return dataManager;
}
public IndexManager getIndexManager() {
return indexManager;
}
public final synchronized void expressDataInterest() throws IOException {
long nextItem = root.getNextItem();
while (nextItem != Item.POSITION_NOT_SET) {
IndexItem item = indexManager.getIndex(nextItem);
item.setOffset(nextItem);
dataManager.addInterestInFile(item.getKeyFile());
dataManager.addInterestInFile(item.getValueFile());
nextItem = item.getNextItem();
}
}
protected final void doClear() {
checkClosed();
loaded = true;
List<IndexItem> indexList = new ArrayList<IndexItem>();
try {
init();
long nextItem = root.getNextItem();
while (nextItem != Item.POSITION_NOT_SET) {
IndexItem item = new IndexItem();
item.setOffset(nextItem);
indexList.add(item);
nextItem = item.getNextItem();
}
root.setNextItem(Item.POSITION_NOT_SET);
storeIndex(root);
for (int i = 0; i < indexList.size(); i++) {
IndexItem item = indexList.get(i);
dataManager.removeInterestInFile(item.getKeyFile());
dataManager.removeInterestInFile(item.getValueFile());
indexManager.freeIndex(item);
}
indexList.clear();
} catch (IOException e) {
LOG.error("Failed to clear Container " + getId(), e);
throw new RuntimeStoreException(e);
}
}
protected final void delete(final IndexItem keyItem, final IndexItem prevItem, final IndexItem nextItem) {
if (keyItem != null) {
try {
root = indexList.getRoot();
IndexItem prev = prevItem == null ? root : prevItem;
IndexItem next = (nextItem == null || !nextItem.equals(root)) ? nextItem : null;
dataManager.removeInterestInFile(keyItem.getKeyFile());
dataManager.removeInterestInFile(keyItem.getValueFile());
if (next != null) {
prev.setNextItem(next.getOffset());
next.setPreviousItem(prev.getOffset());
updateIndexes(next);
} else {
prev.setNextItem(Item.POSITION_NOT_SET);
}
updateIndexes(prev);
indexManager.freeIndex(keyItem);
} catch (IOException e) {
LOG.error("Failed to delete " + keyItem, e);
throw new RuntimeStoreException(e);
}
}
}
protected final void checkClosed() {
if (closed) {
throw new RuntimeStoreException("The store is closed");
}
}
protected void storeIndex(IndexItem item) throws IOException {
indexManager.storeIndex(item);
}
protected void updateIndexes(IndexItem item) throws IOException {
indexManager.updateIndexes(item);
}
protected final boolean isRoot(StoreEntry item) {
return item != null && root != null && (root == item || root.getOffset() == item.getOffset());
// return item != null && indexRoot != null && indexRoot == item;
}
}

View File

@ -1,40 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.container;
/**
* Base class for container collections
*
*
*/
class ContainerCollectionSupport {
protected MapContainerImpl container;
protected ContainerCollectionSupport(MapContainerImpl container) {
this.container = container;
}
public int size() {
return container.size();
}
public boolean isEmpty() {
return container.isEmpty();
}
}

View File

@ -1,110 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.container;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
/**
* Set of Map.Entry objects for a container
*
*
*/
public class ContainerEntrySet extends ContainerCollectionSupport implements Set {
ContainerEntrySet(MapContainerImpl container) {
super(container);
}
public boolean contains(Object o) {
return container.entrySet().contains(o);
}
public Iterator iterator() {
return new ContainerEntrySetIterator(container, buildEntrySet().iterator());
}
public Object[] toArray() {
return buildEntrySet().toArray();
}
public Object[] toArray(Object[] a) {
return buildEntrySet().toArray(a);
}
public boolean add(Object o) {
throw new UnsupportedOperationException("Cannot add here");
}
public boolean remove(Object o) {
boolean result = false;
if (buildEntrySet().remove(o)) {
ContainerMapEntry entry = (ContainerMapEntry)o;
container.remove(entry.getKey());
}
return result;
}
public boolean containsAll(Collection c) {
return buildEntrySet().containsAll(c);
}
public boolean addAll(Collection c) {
throw new UnsupportedOperationException("Cannot add here");
}
public boolean retainAll(Collection c) {
List<Object> tmpList = new ArrayList<Object>();
for (Iterator i = c.iterator(); i.hasNext();) {
Object o = i.next();
if (!contains(o)) {
tmpList.add(o);
}
}
boolean result = false;
for (Iterator<Object> i = tmpList.iterator(); i.hasNext();) {
result |= remove(i.next());
}
return result;
}
public boolean removeAll(Collection c) {
boolean result = true;
for (Iterator i = c.iterator(); i.hasNext();) {
if (!remove(i.next())) {
result = false;
}
}
return result;
}
public void clear() {
container.clear();
}
protected Set<ContainerMapEntry> buildEntrySet() {
Set<ContainerMapEntry> set = new HashSet<ContainerMapEntry>();
for (Iterator i = container.keySet().iterator(); i.hasNext();) {
ContainerMapEntry entry = new ContainerMapEntry(container, i.next());
set.add(entry);
}
return set;
}
}

View File

@ -1,50 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.container;
import java.util.Iterator;
/**
* An Iterator for a container entry Set
*
*
*/
public class ContainerEntrySetIterator implements Iterator {
private MapContainerImpl container;
private Iterator iter;
private ContainerMapEntry currentEntry;
ContainerEntrySetIterator(MapContainerImpl container, Iterator iter) {
this.container = container;
this.iter = iter;
}
public boolean hasNext() {
return iter.hasNext();
}
public Object next() {
currentEntry = (ContainerMapEntry)iter.next();
return currentEntry;
}
public void remove() {
if (currentEntry != null) {
container.remove(currentEntry.getKey());
}
}
}

View File

@ -1,124 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.container;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import org.apache.activemq.kaha.impl.index.IndexItem;
/**
* A Set of keys for the container
*
*
*/
public class ContainerKeySet extends ContainerCollectionSupport implements Set {
ContainerKeySet(MapContainerImpl container) {
super(container);
}
public boolean contains(Object o) {
return container.containsKey(o);
}
public Iterator iterator() {
return new ContainerKeySetIterator(container);
}
public Object[] toArray() {
List<Object> list = new ArrayList<Object>();
IndexItem item = container.getInternalList().getRoot();
while ((item = container.getInternalList().getNextEntry(item)) != null) {
list.add(container.getKey(item));
}
return list.toArray();
}
public Object[] toArray(Object[] a) {
List<Object> list = new ArrayList<Object>();
IndexItem item = container.getInternalList().getRoot();
while ((item = container.getInternalList().getNextEntry(item)) != null) {
list.add(container.getKey(item));
}
return list.toArray(a);
}
public boolean add(Object o) {
throw new UnsupportedOperationException("Cannot add here");
}
public boolean remove(Object o) {
return container.remove(o) != null;
}
public boolean containsAll(Collection c) {
for (Object key : c) {
if (!container.containsKey(key)) {
return false;
}
}
return true;
}
public boolean addAll(Collection c) {
throw new UnsupportedOperationException("Cannot add here");
}
public boolean retainAll(Collection c) {
List<Object> tmpList = new ArrayList<Object>();
for (Iterator i = c.iterator(); i.hasNext();) {
Object o = i.next();
if (!contains(o)) {
tmpList.add(o);
}
}
for (Iterator<Object> i = tmpList.iterator(); i.hasNext();) {
remove(i.next());
}
return !tmpList.isEmpty();
}
public boolean removeAll(Collection c) {
boolean result = true;
for (Iterator i = c.iterator(); i.hasNext();) {
if (!remove(i.next())) {
result = false;
}
}
return result;
}
public void clear() {
container.clear();
}
public String toString() {
StringBuffer result = new StringBuffer(32);
result.append("ContainerKeySet[");
IndexItem item = container.getInternalList().getRoot();
while ((item = container.getInternalList().getNextEntry(item)) != null) {
result.append(container.getKey(item));
result.append(",");
}
result.append("]");
return result.toString();
}
}

View File

@ -1,62 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.container;
import java.util.Iterator;
import org.apache.activemq.kaha.impl.index.IndexItem;
import org.apache.activemq.kaha.impl.index.IndexLinkedList;
/**
* Iterator for the set of keys for a container
*
*
*/
public class ContainerKeySetIterator implements Iterator {
protected IndexItem nextItem;
protected IndexItem currentItem;
private MapContainerImpl container;
private IndexLinkedList list;
ContainerKeySetIterator(MapContainerImpl container) {
this.container = container;
this.list = container.getInternalList();
this.currentItem = list.getRoot();
this.nextItem = list.getNextEntry(currentItem);
}
public boolean hasNext() {
return nextItem != null;
}
public Object next() {
currentItem = nextItem;
Object result = container.getKey(nextItem);
nextItem = list.getNextEntry(nextItem);
return result;
}
public void remove() {
if (currentItem != null) {
container.remove(currentItem);
if (nextItem != null) {
list.refreshEntry(nextItem);
}
}
}
}

View File

@ -1,115 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.container;
import java.util.ListIterator;
import org.apache.activemq.kaha.StoreEntry;
import org.apache.activemq.kaha.impl.index.IndexItem;
import org.apache.activemq.kaha.impl.index.IndexLinkedList;
/**
*
*/
public class ContainerListIterator extends ContainerValueCollectionIterator implements ListIterator {
protected ContainerListIterator(ListContainerImpl container, IndexLinkedList list, IndexItem start) {
super(container, list, start);
}
/*
* (non-Javadoc)
*
* @see java.util.ListIterator#hasPrevious()
*/
public boolean hasPrevious() {
synchronized (container) {
nextItem = (IndexItem)list.refreshEntry(nextItem);
return list.getPrevEntry(nextItem) != null;
}
}
/*
* (non-Javadoc)
*
* @see java.util.ListIterator#previous()
*/
public Object previous() {
synchronized (container) {
nextItem = (IndexItem)list.refreshEntry(nextItem);
nextItem = list.getPrevEntry(nextItem);
return nextItem != null ? container.getValue(nextItem) : null;
}
}
/*
* (non-Javadoc)
*
* @see java.util.ListIterator#nextIndex()
*/
public int nextIndex() {
int result = -1;
if (nextItem != null) {
synchronized (container) {
nextItem = (IndexItem)list.refreshEntry(nextItem);
StoreEntry next = list.getNextEntry(nextItem);
if (next != null) {
result = container.getInternalList().indexOf(next);
}
}
}
return result;
}
/*
* (non-Javadoc)
*
* @see java.util.ListIterator#previousIndex()
*/
public int previousIndex() {
int result = -1;
if (nextItem != null) {
synchronized (container) {
nextItem = (IndexItem)list.refreshEntry(nextItem);
StoreEntry prev = list.getPrevEntry(nextItem);
if (prev != null) {
result = container.getInternalList().indexOf(prev);
}
}
}
return result;
}
/*
* (non-Javadoc)
*
* @see java.util.ListIterator#set(E)
*/
public void set(Object o) {
IndexItem item = ((ListContainerImpl)container).internalSet(previousIndex() + 1, o);
nextItem = item;
}
/*
* (non-Javadoc)
*
* @see java.util.ListIterator#add(E)
*/
public void add(Object o) {
IndexItem item = ((ListContainerImpl)container).internalAdd(previousIndex() + 1, o);
nextItem = item;
}
}

View File

@ -1,49 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.container;
import java.util.Map;
import org.apache.activemq.kaha.MapContainer;
/**
* Map.Entry implementation for a container
*
*
*/
class ContainerMapEntry implements Map.Entry {
private MapContainer container;
private Object key;
ContainerMapEntry(MapContainer container, Object key) {
this.container = container;
this.key = key;
}
public Object getKey() {
return key;
}
public Object getValue() {
return container.get(key);
}
public Object setValue(Object value) {
return container.put(key, value);
}
}

View File

@ -1,131 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.container;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import org.apache.activemq.kaha.impl.index.IndexItem;
import org.apache.activemq.kaha.impl.index.IndexLinkedList;
/**
* Values collection for the MapContainer
*
*
*/
class ContainerValueCollection extends ContainerCollectionSupport implements Collection {
ContainerValueCollection(MapContainerImpl container) {
super(container);
}
public boolean contains(Object o) {
return container.containsValue(o);
}
public Iterator iterator() {
IndexLinkedList list = container.getItemList();
return new ContainerValueCollectionIterator(container, list, list.getRoot());
}
public Object[] toArray() {
Object[] result = null;
IndexLinkedList list = container.getItemList();
synchronized (list) {
result = new Object[list.size()];
IndexItem item = list.getFirst();
int count = 0;
while (item != null) {
Object value = container.getValue(item);
result[count++] = value;
item = list.getNextEntry(item);
}
}
return result;
}
public Object[] toArray(Object[] result) {
IndexLinkedList list = container.getItemList();
synchronized (list) {
if (result.length <= list.size()) {
IndexItem item = list.getFirst();
int count = 0;
while (item != null) {
Object value = container.getValue(item);
result[count++] = value;
item = list.getNextEntry(item);
}
}
}
return result;
}
public boolean add(Object o) {
throw new UnsupportedOperationException("Can't add an object here");
}
public boolean remove(Object o) {
return container.removeValue(o);
}
public boolean containsAll(Collection c) {
boolean result = !c.isEmpty();
for (Iterator i = c.iterator(); i.hasNext();) {
if (!contains(i.next())) {
result = false;
break;
}
}
return result;
}
public boolean addAll(Collection c) {
throw new UnsupportedOperationException("Can't add everything here!");
}
public boolean removeAll(Collection c) {
boolean result = true;
for (Iterator i = c.iterator(); i.hasNext();) {
Object obj = i.next();
result &= remove(obj);
}
return result;
}
public boolean retainAll(Collection c) {
List<Object> tmpList = new ArrayList<Object>();
for (Iterator i = c.iterator(); i.hasNext();) {
Object o = i.next();
if (!contains(o)) {
tmpList.add(o);
}
}
for (Iterator<Object> i = tmpList.iterator(); i.hasNext();) {
remove(i.next());
}
return !tmpList.isEmpty();
}
public void clear() {
container.clear();
}
}

View File

@ -1,64 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.container;
import java.util.Iterator;
import org.apache.activemq.kaha.impl.index.IndexItem;
import org.apache.activemq.kaha.impl.index.IndexLinkedList;
/**
* Values collection iterator for the MapContainer
*
*
*/
public class ContainerValueCollectionIterator implements Iterator {
protected BaseContainerImpl container;
protected IndexLinkedList list;
protected IndexItem nextItem;
protected IndexItem currentItem;
ContainerValueCollectionIterator(BaseContainerImpl container, IndexLinkedList list, IndexItem start) {
this.container = container;
this.list = list;
this.currentItem = start;
this.nextItem = list.getNextEntry((IndexItem)list.refreshEntry(start));
}
public boolean hasNext() {
return nextItem != null;
}
public Object next() {
synchronized (container) {
nextItem = (IndexItem)list.refreshEntry(nextItem);
currentItem = nextItem;
Object result = container.getValue(nextItem);
nextItem = list.getNextEntry(nextItem);
return result;
}
}
public void remove() {
synchronized (container) {
if (currentItem != null) {
currentItem = (IndexItem)list.refreshEntry(currentItem);
container.remove(currentItem);
}
}
}
}

View File

@ -1,892 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.container;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.ListIterator;
import org.apache.activemq.kaha.ContainerId;
import org.apache.activemq.kaha.ListContainer;
import org.apache.activemq.kaha.Marshaller;
import org.apache.activemq.kaha.RuntimeStoreException;
import org.apache.activemq.kaha.Store;
import org.apache.activemq.kaha.StoreEntry;
import org.apache.activemq.kaha.StoreLocation;
import org.apache.activemq.kaha.impl.DataManager;
import org.apache.activemq.kaha.impl.data.Item;
import org.apache.activemq.kaha.impl.index.IndexItem;
import org.apache.activemq.kaha.impl.index.IndexManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Implementation of a ListContainer
*
*
*/
public class ListContainerImpl extends BaseContainerImpl implements ListContainer {
private static final Logger LOG = LoggerFactory.getLogger(ListContainerImpl.class);
protected Marshaller marshaller = Store.OBJECT_MARSHALLER;
public ListContainerImpl(ContainerId id, IndexItem root, IndexManager indexManager,
DataManager dataManager, boolean persistentIndex) throws IOException {
super(id, root, indexManager, dataManager, persistentIndex);
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.ListContainer#load()
*/
public synchronized void load() {
checkClosed();
if (!loaded) {
if (!loaded) {
loaded = true;
try {
init();
long nextItem = root.getNextItem();
while (nextItem != Item.POSITION_NOT_SET) {
IndexItem item = indexManager.getIndex(nextItem);
indexList.add(item);
itemAdded(item, indexList.size() - 1, getValue(item));
nextItem = item.getNextItem();
}
} catch (IOException e) {
LOG.error("Failed to load container " + getId(), e);
throw new RuntimeStoreException(e);
}
}
}
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.ListContainer#unload()
*/
public synchronized void unload() {
checkClosed();
if (loaded) {
loaded = false;
indexList.clear();
}
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.ListContainer#setKeyMarshaller(org.apache.activemq.kaha.Marshaller)
*/
public synchronized void setMarshaller(Marshaller marshaller) {
checkClosed();
this.marshaller = marshaller;
}
public synchronized boolean equals(Object obj) {
load();
boolean result = false;
if (obj != null && obj instanceof List) {
List other = (List)obj;
result = other.size() == size();
if (result) {
for (int i = 0; i < indexList.size(); i++) {
Object o1 = other.get(i);
Object o2 = get(i);
result = o1 == o2 || (o1 != null && o2 != null && o1.equals(o2));
if (!result) {
break;
}
}
}
}
return result;
}
public int hashCode() {
return super.hashCode();
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.ListContainer#size()
*/
public synchronized int size() {
load();
return indexList.size();
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.ListContainer#addFirst(java.lang.Object)
*/
public synchronized void addFirst(Object o) {
internalAddFirst(o);
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.ListContainer#addLast(java.lang.Object)
*/
public synchronized void addLast(Object o) {
internalAddLast(o);
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.ListContainer#removeFirst()
*/
public synchronized Object removeFirst() {
load();
Object result = null;
IndexItem item = indexList.getFirst();
if (item != null) {
itemRemoved(0);
result = getValue(item);
IndexItem prev = root;
IndexItem next = indexList.size() > 1 ? (IndexItem)indexList.get(1) : null;
indexList.removeFirst();
delete(item, prev, next);
item = null;
}
return result;
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.ListContainer#removeLast()
*/
public synchronized Object removeLast() {
load();
Object result = null;
IndexItem last = indexList.getLast();
if (last != null) {
itemRemoved(indexList.size() - 1);
result = getValue(last);
IndexItem prev = indexList.getPrevEntry(last);
IndexItem next = null;
indexList.removeLast();
delete(last, prev, next);
}
return result;
}
/*
* (non-Javadoc)
*
* @see java.util.List#isEmpty()
*/
public synchronized boolean isEmpty() {
load();
return indexList.isEmpty();
}
/*
* (non-Javadoc)
*
* @see java.util.List#contains(java.lang.Object)
*/
public synchronized boolean contains(Object o) {
load();
boolean result = false;
if (o != null) {
IndexItem next = indexList.getFirst();
while (next != null) {
Object value = getValue(next);
if (value != null && value.equals(o)) {
result = true;
break;
}
next = indexList.getNextEntry(next);
}
}
return result;
}
/*
* (non-Javadoc)
*
* @see java.util.List#iterator()
*/
public synchronized Iterator iterator() {
load();
return listIterator();
}
/*
* (non-Javadoc)
*
* @see java.util.List#toArray()
*/
public synchronized Object[] toArray() {
load();
List<Object> tmp = new ArrayList<Object>(indexList.size());
IndexItem next = indexList.getFirst();
while (next != null) {
Object value = getValue(next);
tmp.add(value);
next = indexList.getNextEntry(next);
}
return tmp.toArray();
}
/*
* (non-Javadoc)
*
* @see java.util.List#toArray(T[])
*/
public synchronized Object[] toArray(Object[] a) {
load();
List<Object> tmp = new ArrayList<Object>(indexList.size());
IndexItem next = indexList.getFirst();
while (next != null) {
Object value = getValue(next);
tmp.add(value);
next = indexList.getNextEntry(next);
}
return tmp.toArray(a);
}
/*
* (non-Javadoc)
*
* @see java.util.List#add(E)
*/
public synchronized boolean add(Object o) {
load();
addLast(o);
return true;
}
/*
* (non-Javadoc)
*
* @see java.util.List#remove(java.lang.Object)
*/
public synchronized boolean remove(Object o) {
load();
boolean result = false;
int pos = 0;
IndexItem next = indexList.getFirst();
while (next != null) {
Object value = getValue(next);
if (value != null && value.equals(o)) {
remove(next);
itemRemoved(pos);
result = true;
break;
}
next = indexList.getNextEntry(next);
pos++;
}
return result;
}
protected synchronized void remove(IndexItem item) {
IndexItem prev = indexList.getPrevEntry(item);
IndexItem next = indexList.getNextEntry(item);
indexList.remove(item);
delete(item, prev, next);
}
/*
* (non-Javadoc)
*
* @see java.util.List#containsAll(java.util.Collection)
*/
public synchronized boolean containsAll(Collection c) {
load();
for (Iterator i = c.iterator(); i.hasNext();) {
Object obj = i.next();
if (!contains(obj)) {
return false;
}
}
return true;
}
/*
* (non-Javadoc)
*
* @see java.util.List#addAll(java.util.Collection)
*/
public synchronized boolean addAll(Collection c) {
load();
for (Iterator i = c.iterator(); i.hasNext();) {
add(i.next());
}
return true;
}
/*
* (non-Javadoc)
*
* @see java.util.List#addAll(int, java.util.Collection)
*/
public synchronized boolean addAll(int index, Collection c) {
load();
boolean result = false;
ListIterator e1 = listIterator(index);
Iterator e2 = c.iterator();
while (e2.hasNext()) {
e1.add(e2.next());
result = true;
}
return result;
}
/*
* (non-Javadoc)
*
* @see java.util.List#removeAll(java.util.Collection)
*/
public synchronized boolean removeAll(Collection c) {
load();
boolean result = true;
for (Iterator i = c.iterator(); i.hasNext();) {
Object obj = i.next();
result &= remove(obj);
}
return result;
}
/*
* (non-Javadoc)
*
* @see java.util.List#retainAll(java.util.Collection)
*/
public synchronized boolean retainAll(Collection c) {
load();
List<Object> tmpList = new ArrayList<Object>();
IndexItem next = indexList.getFirst();
while (next != null) {
Object o = getValue(next);
if (!c.contains(o)) {
tmpList.add(o);
}
next = indexList.getNextEntry(next);
}
for (Iterator<Object> i = tmpList.iterator(); i.hasNext();) {
remove(i.next());
}
return !tmpList.isEmpty();
}
/*
* (non-Javadoc)
*
* @see java.util.List#clear()
*/
public synchronized void clear() {
checkClosed();
super.clear();
doClear();
}
/*
* (non-Javadoc)
*
* @see java.util.List#get(int)
*/
public synchronized Object get(int index) {
load();
Object result = null;
IndexItem item = indexList.get(index);
if (item != null) {
result = getValue(item);
}
return result;
}
/*
* (non-Javadoc)
*
* @see java.util.List#set(int, E)
*/
public synchronized Object set(int index, Object element) {
load();
Object result = null;
IndexItem replace = indexList.isEmpty() ? null : (IndexItem)indexList.get(index);
IndexItem prev = (indexList.isEmpty() || (index - 1) < 0) ? null : (IndexItem)indexList
.get(index - 1);
IndexItem next = (indexList.isEmpty() || (index + 1) >= size()) ? null : (IndexItem)indexList
.get(index + 1);
result = getValue(replace);
indexList.remove(index);
delete(replace, prev, next);
itemRemoved(index);
add(index, element);
return result;
}
protected synchronized IndexItem internalSet(int index, Object element) {
IndexItem replace = indexList.isEmpty() ? null : (IndexItem)indexList.get(index);
IndexItem prev = (indexList.isEmpty() || (index - 1) < 0) ? null : (IndexItem)indexList
.get(index - 1);
IndexItem next = (indexList.isEmpty() || (index + 1) >= size()) ? null : (IndexItem)indexList
.get(index + 1);
indexList.remove(index);
delete(replace, prev, next);
itemRemoved(index);
return internalAdd(index, element);
}
/*
* (non-Javadoc)
*
* @see java.util.List#add(int, E)
*/
public synchronized void add(int index, Object element) {
load();
IndexItem item = insert(index, element);
indexList.add(index, item);
itemAdded(item, index, element);
}
protected synchronized StoreEntry internalAddLast(Object o) {
load();
IndexItem item = writeLast(o);
indexList.addLast(item);
itemAdded(item, indexList.size() - 1, o);
return item;
}
protected synchronized StoreEntry internalAddFirst(Object o) {
load();
IndexItem item = writeFirst(o);
indexList.addFirst(item);
itemAdded(item, 0, o);
return item;
}
protected synchronized IndexItem internalAdd(int index, Object element) {
load();
IndexItem item = insert(index, element);
indexList.add(index, item);
itemAdded(item, index, element);
return item;
}
protected synchronized StoreEntry internalGet(int index) {
load();
if (index >= 0 && index < indexList.size()) {
return indexList.get(index);
}
return null;
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.ListContainer#doRemove(int)
*/
public synchronized boolean doRemove(int index) {
load();
boolean result = false;
IndexItem item = indexList.get(index);
if (item != null) {
result = true;
IndexItem prev = indexList.getPrevEntry(item);
prev = prev != null ? prev : root;
IndexItem next = indexList.getNextEntry(prev);
indexList.remove(index);
itemRemoved(index);
delete(item, prev, next);
}
return result;
}
/*
* (non-Javadoc)
*
* @see java.util.List#remove(int)
*/
public synchronized Object remove(int index) {
load();
Object result = null;
IndexItem item = indexList.get(index);
if (item != null) {
itemRemoved(index);
result = getValue(item);
IndexItem prev = indexList.getPrevEntry(item);
prev = prev != null ? prev : root;
IndexItem next = indexList.getNextEntry(item);
indexList.remove(index);
delete(item, prev, next);
}
return result;
}
/*
* (non-Javadoc)
*
* @see java.util.List#indexOf(java.lang.Object)
*/
public synchronized int indexOf(Object o) {
load();
int result = -1;
if (o != null) {
int count = 0;
IndexItem next = indexList.getFirst();
while (next != null) {
Object value = getValue(next);
if (value != null && value.equals(o)) {
result = count;
break;
}
count++;
next = indexList.getNextEntry(next);
}
}
return result;
}
/*
* (non-Javadoc)
*
* @see java.util.List#lastIndexOf(java.lang.Object)
*/
public synchronized int lastIndexOf(Object o) {
load();
int result = -1;
if (o != null) {
int count = indexList.size() - 1;
IndexItem next = indexList.getLast();
while (next != null) {
Object value = getValue(next);
if (value != null && value.equals(o)) {
result = count;
break;
}
count--;
next = indexList.getPrevEntry(next);
}
}
return result;
}
/*
* (non-Javadoc)
*
* @see java.util.List#listIterator()
*/
public synchronized ListIterator listIterator() {
load();
return new ContainerListIterator(this, indexList, indexList.getRoot());
}
/*
* (non-Javadoc)
*
* @see java.util.List#listIterator(int)
*/
public synchronized ListIterator listIterator(int index) {
load();
IndexItem start = (index - 1) > 0 ? indexList.get(index - 1) : indexList.getRoot();
return new ContainerListIterator(this, indexList, start);
}
/*
* (non-Javadoc)
*
* @see java.util.List#subList(int, int)
*/
public synchronized List<Object> subList(int fromIndex, int toIndex) {
load();
List<Object> result = new ArrayList<Object>();
int count = fromIndex;
IndexItem next = indexList.get(fromIndex);
while (next != null && count++ < toIndex) {
result.add(getValue(next));
next = indexList.getNextEntry(next);
}
return result;
}
/**
* add an Object to the list but get a StoreEntry of its position
*
* @param object
* @return the entry in the Store
*/
public synchronized StoreEntry placeLast(Object object) {
StoreEntry item = internalAddLast(object);
return item;
}
/**
* insert an Object in first position int the list but get a StoreEntry of
* its position
*
* @param object
* @return the location in the Store
*/
public synchronized StoreEntry placeFirst(Object object) {
StoreEntry item = internalAddFirst(object);
return item;
}
/**
* @param entry
* @param object
* @see org.apache.activemq.kaha.ListContainer#update(org.apache.activemq.kaha.StoreEntry,
* java.lang.Object)
*/
public synchronized void update(StoreEntry entry, Object object) {
try {
dataManager.updateItem(entry.getValueDataItem(), marshaller, object);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/**
* Retrieve an Object from the Store by its location
*
* @param entry
* @return the Object at that entry
*/
public synchronized Object get(final StoreEntry entry) {
load();
StoreEntry entryToUse = refresh(entry);
return getValue(entryToUse);
}
/**
* remove the Object at the StoreEntry
*
* @param entry
* @return true if successful
*/
public synchronized boolean remove(StoreEntry entry) {
IndexItem item = (IndexItem)entry;
load();
boolean result = false;
if (item != null) {
remove(item);
result = true;
}
return result;
}
/**
* Get the StoreEntry for the first item of the list
*
* @return the first StoreEntry or null if the list is empty
*/
public synchronized StoreEntry getFirst() {
load();
return indexList.getFirst();
}
/**
* Get the StoreEntry for the last item of the list
*
* @return the last StoreEntry or null if the list is empty
*/
public synchronized StoreEntry getLast() {
load();
return indexList.getLast();
}
/**
* Get the next StoreEntry from the list
*
* @param entry
* @return the next StoreEntry or null
*/
public synchronized StoreEntry getNext(StoreEntry entry) {
load();
IndexItem item = (IndexItem)entry;
return indexList.getNextEntry(item);
}
/**
* Get the previous StoreEntry from the list
*
* @param entry
* @return the previous store entry or null
*/
public synchronized StoreEntry getPrevious(StoreEntry entry) {
load();
IndexItem item = (IndexItem)entry;
return indexList.getPrevEntry(item);
}
/**
* It's possible that a StoreEntry could be come stale this will return an
* upto date entry for the StoreEntry position
*
* @param entry old entry
* @return a refreshed StoreEntry
*/
public synchronized StoreEntry refresh(StoreEntry entry) {
load();
return indexList.getEntry(entry);
}
protected synchronized IndexItem writeLast(Object value) {
IndexItem index = null;
try {
if (value != null) {
StoreLocation data = dataManager.storeDataItem(marshaller, value);
index = indexManager.createNewIndex();
index.setValueData(data);
IndexItem prev = indexList.getLast();
prev = prev != null ? prev : root;
IndexItem next = indexList.getNextEntry(prev);
prev.setNextItem(index.getOffset());
index.setPreviousItem(prev.getOffset());
updateIndexes(prev);
if (next != null) {
next.setPreviousItem(index.getOffset());
index.setNextItem(next.getOffset());
updateIndexes(next);
}
storeIndex(index);
}
} catch (IOException e) {
LOG.error("Failed to write " + value, e);
throw new RuntimeStoreException(e);
}
return index;
}
protected synchronized IndexItem writeFirst(Object value) {
IndexItem index = null;
try {
if (value != null) {
StoreLocation data = dataManager.storeDataItem(marshaller, value);
index = indexManager.createNewIndex();
index.setValueData(data);
IndexItem prev = root;
IndexItem next = indexList.getNextEntry(prev);
prev.setNextItem(index.getOffset());
index.setPreviousItem(prev.getOffset());
updateIndexes(prev);
if (next != null) {
next.setPreviousItem(index.getOffset());
index.setNextItem(next.getOffset());
updateIndexes(next);
}
storeIndex(index);
}
} catch (IOException e) {
LOG.error("Failed to write " + value, e);
throw new RuntimeStoreException(e);
}
return index;
}
protected synchronized IndexItem insert(int insertPos, Object value) {
IndexItem index = null;
try {
if (value != null) {
StoreLocation data = dataManager.storeDataItem(marshaller, value);
index = indexManager.createNewIndex();
index.setValueData(data);
IndexItem prev = null;
IndexItem next = null;
if (insertPos <= 0) {
prev = root;
next = indexList.getNextEntry(root);
} else if (insertPos >= indexList.size()) {
prev = indexList.getLast();
if (prev==null) {
prev=root;
}
next = null;
} else {
prev = indexList.get(insertPos);
prev = prev != null ? prev : root;
next = indexList.getNextEntry(prev);
}
prev.setNextItem(index.getOffset());
index.setPreviousItem(prev.getOffset());
updateIndexes(prev);
if (next != null) {
next.setPreviousItem(index.getOffset());
index.setNextItem(next.getOffset());
updateIndexes(next);
}
storeIndex(index);
indexList.setRoot(root);
}
} catch (IOException e) {
LOG.error("Failed to insert " + value, e);
throw new RuntimeStoreException(e);
}
return index;
}
protected synchronized Object getValue(StoreEntry item) {
Object result = null;
if (item != null) {
try {
StoreLocation data = item.getValueDataItem();
result = dataManager.readItem(marshaller, data);
} catch (IOException e) {
LOG.error("Failed to get value for " + item, e);
throw new RuntimeStoreException(e);
}
}
return result;
}
/**
* @return a string representation of this collection.
*/
public synchronized String toString() {
StringBuffer result = new StringBuffer();
result.append("[");
Iterator i = iterator();
boolean hasNext = i.hasNext();
while (hasNext) {
Object o = i.next();
result.append(String.valueOf(o));
hasNext = i.hasNext();
if (hasNext) {
result.append(", ");
}
}
result.append("]");
return result.toString();
}
protected synchronized void itemAdded(IndexItem item, int pos, Object value) {
}
protected synchronized void itemRemoved(int pos) {
}
}

View File

@ -1,620 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.container;
import java.io.File;
import java.io.IOException;
import java.util.Collection;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import org.apache.activemq.kaha.ContainerId;
import org.apache.activemq.kaha.IndexMBean;
import org.apache.activemq.kaha.MapContainer;
import org.apache.activemq.kaha.Marshaller;
import org.apache.activemq.kaha.RuntimeStoreException;
import org.apache.activemq.kaha.Store;
import org.apache.activemq.kaha.StoreEntry;
import org.apache.activemq.kaha.StoreLocation;
import org.apache.activemq.kaha.impl.DataManager;
import org.apache.activemq.kaha.impl.data.Item;
import org.apache.activemq.kaha.impl.index.Index;
import org.apache.activemq.kaha.impl.index.IndexItem;
import org.apache.activemq.kaha.impl.index.IndexLinkedList;
import org.apache.activemq.kaha.impl.index.IndexManager;
import org.apache.activemq.kaha.impl.index.VMIndex;
import org.apache.activemq.kaha.impl.index.hash.HashIndex;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Implementation of a MapContainer
*
*
*/
public final class MapContainerImpl extends BaseContainerImpl implements MapContainer {
private static final Logger LOG = LoggerFactory.getLogger(MapContainerImpl.class);
protected Index index;
protected Marshaller keyMarshaller = Store.OBJECT_MARSHALLER;
protected Marshaller valueMarshaller = Store.OBJECT_MARSHALLER;
protected File directory;
private int indexBinSize = HashIndex.DEFAULT_BIN_SIZE;
private int indexKeySize = HashIndex.DEFAULT_KEY_SIZE;
private int indexPageSize = HashIndex.DEFAULT_PAGE_SIZE;
private int indexMaxBinSize = HashIndex.MAXIMUM_CAPACITY;
private int indexLoadFactor = HashIndex.DEFAULT_LOAD_FACTOR;
public MapContainerImpl(File directory, ContainerId id, IndexItem root, IndexManager indexManager,
DataManager dataManager, boolean persistentIndex) {
super(id, root, indexManager, dataManager, persistentIndex);
this.directory = directory;
}
public synchronized void init() {
super.init();
if (index == null) {
if (persistentIndex) {
String name = containerId.getDataContainerName() + "_" + containerId.getKey();
try {
HashIndex hashIndex = new HashIndex(directory, name, indexManager);
hashIndex.setNumberOfBins(getIndexBinSize());
hashIndex.setKeySize(getIndexKeySize());
hashIndex.setPageSize(getIndexPageSize());
hashIndex.setMaximumCapacity(getIndexMaxBinSize());
hashIndex.setLoadFactor(getIndexLoadFactor());
this.index = hashIndex;
} catch (IOException e) {
LOG.error("Failed to create HashIndex", e);
throw new RuntimeException(e);
}
} else {
this.index = new VMIndex(indexManager);
}
}
index.setKeyMarshaller(keyMarshaller);
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.MapContainer#load()
*/
public synchronized void load() {
checkClosed();
if (!loaded) {
if (!loaded) {
loaded = true;
try {
init();
index.load();
long nextItem = root.getNextItem();
while (nextItem != Item.POSITION_NOT_SET) {
IndexItem item = indexManager.getIndex(nextItem);
StoreLocation data = item.getKeyDataItem();
Object key = dataManager.readItem(keyMarshaller, data);
if (index.isTransient()) {
index.store(key, item);
}
indexList.add(item);
nextItem = item.getNextItem();
}
} catch (IOException e) {
LOG.error("Failed to load container " + getId(), e);
throw new RuntimeStoreException(e);
}
}
}
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.MapContainer#unload()
*/
public synchronized void unload() {
checkClosed();
if (loaded) {
loaded = false;
try {
index.unload();
} catch (IOException e) {
LOG.warn("Failed to unload the index", e);
}
indexList.clear();
}
}
public synchronized void delete() {
unload();
try {
index.delete();
} catch (IOException e) {
LOG.warn("Failed to unload the index", e);
}
}
public synchronized void setKeyMarshaller(Marshaller keyMarshaller) {
checkClosed();
this.keyMarshaller = keyMarshaller;
if (index != null) {
index.setKeyMarshaller(keyMarshaller);
}
}
public synchronized void setValueMarshaller(Marshaller valueMarshaller) {
checkClosed();
this.valueMarshaller = valueMarshaller;
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.MapContainer#size()
*/
public synchronized int size() {
load();
return indexList.size();
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.MapContainer#isEmpty()
*/
public synchronized boolean isEmpty() {
load();
return indexList.isEmpty();
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.MapContainer#containsKey(java.lang.Object)
*/
public synchronized boolean containsKey(Object key) {
load();
try {
return index.containsKey(key);
} catch (IOException e) {
LOG.error("Failed trying to find key: " + key, e);
throw new RuntimeException(e);
}
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.MapContainer#get(java.lang.Object)
*/
public synchronized Object get(Object key) {
load();
Object result = null;
StoreEntry item = null;
try {
item = index.get(key);
} catch (IOException e) {
LOG.error("Failed trying to get key: " + key, e);
throw new RuntimeException(e);
}
if (item != null) {
result = getValue(item);
}
return result;
}
/**
* Get the StoreEntry associated with the key
*
* @param key
* @return the StoreEntry
*/
public synchronized StoreEntry getEntry(Object key) {
load();
StoreEntry item = null;
try {
item = index.get(key);
} catch (IOException e) {
LOG.error("Failed trying to get key: " + key, e);
throw new RuntimeException(e);
}
return item;
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.MapContainer#containsValue(java.lang.Object)
*/
public synchronized boolean containsValue(Object o) {
load();
boolean result = false;
if (o != null) {
IndexItem item = indexList.getFirst();
while (item != null) {
Object value = getValue(item);
if (value != null && value.equals(o)) {
result = true;
break;
}
item = indexList.getNextEntry(item);
}
}
return result;
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.MapContainer#putAll(java.util.Map)
*/
public synchronized void putAll(Map t) {
load();
if (t != null) {
for (Iterator i = t.entrySet().iterator(); i.hasNext();) {
Map.Entry entry = (Map.Entry)i.next();
put(entry.getKey(), entry.getValue());
}
}
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.MapContainer#keySet()
*/
public synchronized Set keySet() {
load();
return new ContainerKeySet(this);
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.MapContainer#values()
*/
public synchronized Collection values() {
load();
return new ContainerValueCollection(this);
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.MapContainer#entrySet()
*/
public synchronized Set entrySet() {
load();
return new ContainerEntrySet(this);
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.MapContainer#put(java.lang.Object,
* java.lang.Object)
*/
public synchronized Object put(Object key, Object value) {
load();
Object result = remove(key);
IndexItem item = write(key, value);
try {
index.store(key, item);
} catch (IOException e) {
LOG.error("Failed trying to insert key: " + key, e);
throw new RuntimeException(e);
}
indexList.add(item);
return result;
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.MapContainer#remove(java.lang.Object)
*/
public synchronized Object remove(Object key) {
load();
try {
Object result = null;
IndexItem item = (IndexItem)index.remove(key);
if (item != null) {
// refresh the index
item = (IndexItem)indexList.refreshEntry(item);
result = getValue(item);
IndexItem prev = indexList.getPrevEntry(item);
IndexItem next = indexList.getNextEntry(item);
indexList.remove(item);
delete(item, prev, next);
}
return result;
} catch (IOException e) {
LOG.error("Failed trying to remove key: " + key, e);
throw new RuntimeException(e);
}
}
public synchronized boolean removeValue(Object o) {
load();
boolean result = false;
if (o != null) {
IndexItem item = indexList.getFirst();
while (item != null) {
Object value = getValue(item);
if (value != null && value.equals(o)) {
result = true;
// find the key
Object key = getKey(item);
if (key != null) {
remove(key);
}
break;
}
item = indexList.getNextEntry(item);
}
}
return result;
}
protected synchronized void remove(IndexItem item) {
Object key = getKey(item);
if (key != null) {
remove(key);
}
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.MapContainer#clear()
*/
public synchronized void clear() {
checkClosed();
loaded = true;
init();
if (index != null) {
try {
index.clear();
} catch (IOException e) {
LOG.error("Failed trying clear index", e);
throw new RuntimeException(e);
}
}
super.clear();
doClear();
}
/**
* Add an entry to the Store Map
*
* @param key
* @param value
* @return the StoreEntry associated with the entry
*/
public synchronized StoreEntry place(Object key, Object value) {
load();
try {
remove(key);
IndexItem item = write(key, value);
index.store(key, item);
indexList.add(item);
return item;
} catch (IOException e) {
LOG.error("Failed trying to place key: " + key, e);
throw new RuntimeException(e);
}
}
/**
* Remove an Entry from ther Map
*
* @param entry
* @throws IOException
*/
public synchronized void remove(StoreEntry entry) {
load();
IndexItem item = (IndexItem)entry;
if (item != null) {
Object key = getKey(item);
try {
index.remove(key);
} catch (IOException e) {
LOG.error("Failed trying to remove entry: " + entry, e);
throw new RuntimeException(e);
}
IndexItem prev = indexList.getPrevEntry(item);
IndexItem next = indexList.getNextEntry(item);
indexList.remove(item);
delete(item, prev, next);
}
}
public synchronized StoreEntry getFirst() {
load();
return indexList.getFirst();
}
public synchronized StoreEntry getLast() {
load();
return indexList.getLast();
}
public synchronized StoreEntry getNext(StoreEntry entry) {
load();
IndexItem item = (IndexItem)entry;
return indexList.getNextEntry(item);
}
public synchronized StoreEntry getPrevious(StoreEntry entry) {
load();
IndexItem item = (IndexItem)entry;
return indexList.getPrevEntry(item);
}
public synchronized StoreEntry refresh(StoreEntry entry) {
load();
return indexList.getEntry(entry);
}
/**
* Get the value from it's location
*
* @param item
* @return the value associated with the store entry
*/
public synchronized Object getValue(StoreEntry item) {
load();
Object result = null;
if (item != null) {
try {
// ensure this value is up to date
// item=indexList.getEntry(item);
StoreLocation data = item.getValueDataItem();
result = dataManager.readItem(valueMarshaller, data);
} catch (IOException e) {
LOG.error("Failed to get value for " + item, e);
throw new RuntimeStoreException(e);
}
}
return result;
}
/**
* Get the Key object from it's location
*
* @param item
* @return the Key Object associated with the StoreEntry
*/
public synchronized Object getKey(StoreEntry item) {
load();
Object result = null;
if (item != null) {
try {
StoreLocation data = item.getKeyDataItem();
result = dataManager.readItem(keyMarshaller, data);
} catch (IOException e) {
LOG.error("Failed to get key for " + item, e);
throw new RuntimeStoreException(e);
}
}
return result;
}
protected IndexLinkedList getItemList() {
return indexList;
}
protected synchronized IndexItem write(Object key, Object value) {
IndexItem index = null;
try {
index = indexManager.createNewIndex();
StoreLocation data = dataManager.storeDataItem(keyMarshaller, key);
index.setKeyData(data);
if (value != null) {
data = dataManager.storeDataItem(valueMarshaller, value);
index.setValueData(data);
}
IndexItem prev = indexList.getLast();
prev = prev != null ? prev : indexList.getRoot();
IndexItem next = indexList.getNextEntry(prev);
prev.setNextItem(index.getOffset());
index.setPreviousItem(prev.getOffset());
updateIndexes(prev);
if (next != null) {
next.setPreviousItem(index.getOffset());
index.setNextItem(next.getOffset());
updateIndexes(next);
}
storeIndex(index);
} catch (IOException e) {
LOG.error("Failed to write " + key + " , " + value, e);
throw new RuntimeStoreException(e);
}
return index;
}
public int getIndexBinSize() {
return indexBinSize;
}
public void setIndexBinSize(int indexBinSize) {
this.indexBinSize = indexBinSize;
}
public int getIndexKeySize() {
return indexKeySize;
}
public void setIndexKeySize(int indexKeySize) {
this.indexKeySize = indexKeySize;
}
public int getIndexPageSize() {
return indexPageSize;
}
public void setIndexPageSize(int indexPageSize) {
this.indexPageSize = indexPageSize;
}
public int getIndexLoadFactor() {
return indexLoadFactor;
}
public void setIndexLoadFactor(int loadFactor) {
this.indexLoadFactor = loadFactor;
}
public IndexMBean getIndexMBean() {
return (IndexMBean) index;
}
public int getIndexMaxBinSize() {
return indexMaxBinSize;
}
public void setIndexMaxBinSize(int maxBinSize) {
this.indexMaxBinSize = maxBinSize;
}
public String toString() {
load();
StringBuffer buf = new StringBuffer();
buf.append("{");
Iterator i = entrySet().iterator();
boolean hasNext = i.hasNext();
while (hasNext) {
Map.Entry e = (Entry) i.next();
Object key = e.getKey();
Object value = e.getValue();
buf.append(key);
buf.append("=");
buf.append(value);
hasNext = i.hasNext();
if (hasNext)
buf.append(", ");
}
buf.append("}");
return buf.toString();
}
}

View File

@ -1,25 +0,0 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<html>
<head>
</head>
<body>
Map and List container implementations for Kaha
</body>
</html>

View File

@ -1,124 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.data;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.RandomAccessFile;
/**
* DataFile
*
*
*/
class DataFile {
private final File file;
private final Integer number;
private int referenceCount;
private RandomAccessFile randomAcessFile;
private Object writerData;
private long length;
private boolean dirty;
DataFile(File file, int number) {
this.file = file;
this.number = Integer.valueOf(number);
length = file.exists() ? file.length() : 0;
}
Integer getNumber() {
return number;
}
synchronized RandomAccessFile getRandomAccessFile() throws FileNotFoundException {
if (randomAcessFile == null) {
randomAcessFile = new RandomAccessFile(file, "rw");
}
return randomAcessFile;
}
synchronized long getLength() {
return length;
}
synchronized void incrementLength(int size) {
length += size;
}
synchronized void purge() throws IOException {
if (randomAcessFile != null) {
randomAcessFile.close();
randomAcessFile = null;
}
}
synchronized boolean delete() throws IOException {
purge();
return file.delete();
}
synchronized void close() throws IOException {
if (randomAcessFile != null) {
randomAcessFile.close();
}
}
synchronized int increment() {
return ++referenceCount;
}
synchronized int decrement() {
return --referenceCount;
}
synchronized boolean isUnused() {
return referenceCount <= 0;
}
public String toString() {
String result = file.getName() + " number = " + number + " , length = " + length + " refCount = " + referenceCount;
return result;
}
/**
* @return Opaque data that a DataFileWriter may want to associate with the
* DataFile.
*/
public synchronized Object getWriterData() {
return writerData;
}
/**
* @param writerData - Opaque data that a DataFileWriter may want to
* associate with the DataFile.
*/
public synchronized void setWriterData(Object writerData) {
this.writerData = writerData;
dirty = true;
}
public synchronized boolean isDirty() {
return dirty;
}
public synchronized void setDirty(boolean value) {
this.dirty = value;
}
}

View File

@ -1,101 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.data;
import org.apache.activemq.kaha.StoreLocation;
/**
* A a wrapper for a data in the store
*
*
*/
public final class DataItem implements Item, StoreLocation {
private int file = (int)POSITION_NOT_SET;
private long offset = POSITION_NOT_SET;
private int size;
public DataItem() {
}
DataItem(DataItem item) {
this.file = item.file;
this.offset = item.offset;
this.size = item.size;
}
boolean isValid() {
return file != POSITION_NOT_SET;
}
/**
* @return
* @see org.apache.activemq.kaha.StoreLocation#getSize()
*/
public int getSize() {
return size;
}
/**
* @param size The size to set.
*/
public void setSize(int size) {
this.size = size;
}
/**
* @return
* @see org.apache.activemq.kaha.StoreLocation#getOffset()
*/
public long getOffset() {
return offset;
}
/**
* @param offset The offset to set.
*/
public void setOffset(long offset) {
this.offset = offset;
}
/**
* @return
* @see org.apache.activemq.kaha.StoreLocation#getFile()
*/
public int getFile() {
return file;
}
/**
* @param file The file to set.
*/
public void setFile(int file) {
this.file = file;
}
/**
* @return a pretty print
*/
public String toString() {
String result = "offset = " + offset + ", file = " + file + ", size = " + size;
return result;
}
public DataItem copy() {
return new DataItem(this);
}
}

View File

@ -1,408 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.data;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.activemq.kaha.Marshaller;
import org.apache.activemq.kaha.StoreLocation;
import org.apache.activemq.kaha.impl.DataManager;
import org.apache.activemq.kaha.impl.index.RedoStoreIndexItem;
import org.apache.activemq.util.IOExceptionSupport;
import org.apache.activemq.util.IOHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Manages DataFiles
*
*
*/
public final class DataManagerImpl implements DataManager {
public static final int ITEM_HEAD_SIZE = 5; // type + length
public static final byte DATA_ITEM_TYPE = 1;
public static final byte REDO_ITEM_TYPE = 2;
public static final long MAX_FILE_LENGTH = 1024 * 1024 * 32;
private static final Logger LOG = LoggerFactory.getLogger(DataManagerImpl.class);
private static final String NAME_PREFIX = "data-";
private final File directory;
private final String name;
private SyncDataFileReader reader;
private SyncDataFileWriter writer;
private DataFile currentWriteFile;
private long maxFileLength = MAX_FILE_LENGTH;
private Map<Integer, DataFile> fileMap = new HashMap<Integer, DataFile>();
private Marshaller redoMarshaller = RedoStoreIndexItem.MARSHALLER;
private String dataFilePrefix;
private final AtomicLong storeSize;
public DataManagerImpl(File dir, final String name,AtomicLong storeSize) {
this.directory = dir;
this.name = name;
this.storeSize=storeSize;
dataFilePrefix = IOHelper.toFileSystemSafeName(NAME_PREFIX + name + "-");
// build up list of current dataFiles
File[] files = dir.listFiles(new FilenameFilter() {
public boolean accept(File dir, String n) {
return dir.equals(directory) && n.startsWith(dataFilePrefix);
}
});
if (files != null) {
for (int i = 0; i < files.length; i++) {
File file = files[i];
String n = file.getName();
String numStr = n.substring(dataFilePrefix.length(), n.length());
int num = Integer.parseInt(numStr);
DataFile dataFile = new DataFile(file, num);
storeSize.addAndGet(dataFile.getLength());
fileMap.put(dataFile.getNumber(), dataFile);
if (currentWriteFile == null || currentWriteFile.getNumber().intValue() < num) {
currentWriteFile = dataFile;
}
}
}
}
private DataFile createAndAddDataFile(int num) {
String fileName = dataFilePrefix + num;
File file = new File(directory, fileName);
DataFile result = new DataFile(file, num);
fileMap.put(result.getNumber(), result);
return result;
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.data.IDataManager#getName()
*/
public String getName() {
return name;
}
synchronized DataFile findSpaceForData(DataItem item) throws IOException {
if (currentWriteFile == null || ((currentWriteFile.getLength() + item.getSize()) > maxFileLength)) {
int nextNum = currentWriteFile != null ? currentWriteFile.getNumber().intValue() + 1 : 1;
if (currentWriteFile != null && currentWriteFile.isUnused()) {
removeDataFile(currentWriteFile);
}
currentWriteFile = createAndAddDataFile(nextNum);
}
item.setOffset(currentWriteFile.getLength());
item.setFile(currentWriteFile.getNumber().intValue());
int len = item.getSize() + ITEM_HEAD_SIZE;
currentWriteFile.incrementLength(len);
storeSize.addAndGet(len);
return currentWriteFile;
}
DataFile getDataFile(StoreLocation item) throws IOException {
Integer key = Integer.valueOf(item.getFile());
DataFile dataFile = fileMap.get(key);
if (dataFile == null) {
LOG.error("Looking for key " + key + " but not found in fileMap: " + fileMap);
throw new IOException("Could not locate data file " + NAME_PREFIX + name + "-" + item.getFile());
}
return dataFile;
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.data.IDataManager#readItem(org.apache.activemq.kaha.Marshaller,
* org.apache.activemq.kaha.StoreLocation)
*/
public synchronized Object readItem(Marshaller marshaller, StoreLocation item) throws IOException {
return getReader().readItem(marshaller, item);
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.data.IDataManager#storeDataItem(org.apache.activemq.kaha.Marshaller,
* java.lang.Object)
*/
public synchronized StoreLocation storeDataItem(Marshaller marshaller, Object payload) throws IOException {
return getWriter().storeItem(marshaller, payload, DATA_ITEM_TYPE);
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.data.IDataManager#storeRedoItem(java.lang.Object)
*/
public synchronized StoreLocation storeRedoItem(Object payload) throws IOException {
return getWriter().storeItem(redoMarshaller, payload, REDO_ITEM_TYPE);
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.data.IDataManager#updateItem(org.apache.activemq.kaha.StoreLocation,
* org.apache.activemq.kaha.Marshaller, java.lang.Object)
*/
public synchronized void updateItem(StoreLocation location, Marshaller marshaller, Object payload)
throws IOException {
getWriter().updateItem((DataItem)location, marshaller, payload, DATA_ITEM_TYPE);
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.data.IDataManager#recoverRedoItems(org.apache.activemq.kaha.impl.data.RedoListener)
*/
public synchronized void recoverRedoItems(RedoListener listener) throws IOException {
// Nothing to recover if there is no current file.
if (currentWriteFile == null) {
return;
}
DataItem item = new DataItem();
item.setFile(currentWriteFile.getNumber().intValue());
item.setOffset(0);
while (true) {
byte type;
try {
type = getReader().readDataItemSize(item);
} catch (IOException ignore) {
LOG.trace("End of data file reached at (header was invalid): " + item);
return;
}
if (type == REDO_ITEM_TYPE) {
// Un-marshal the redo item
Object object;
try {
object = readItem(redoMarshaller, item);
} catch (IOException e1) {
LOG.trace("End of data file reached at (payload was invalid): " + item);
return;
}
try {
listener.onRedoItem(item, object);
// in case the listener is holding on to item references,
// copy it
// so we don't change it behind the listener's back.
item = item.copy();
} catch (Exception e) {
throw IOExceptionSupport.create("Recovery handler failed: " + e, e);
}
}
// Move to the next item.
item.setOffset(item.getOffset() + ITEM_HEAD_SIZE + item.getSize());
}
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.data.IDataManager#close()
*/
public synchronized void close() throws IOException {
getWriter().close();
for (Iterator<DataFile> i = fileMap.values().iterator(); i.hasNext();) {
DataFile dataFile = i.next();
getWriter().force(dataFile);
dataFile.close();
}
fileMap.clear();
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.data.IDataManager#force()
*/
public synchronized void force() throws IOException {
for (Iterator<DataFile> i = fileMap.values().iterator(); i.hasNext();) {
DataFile dataFile = i.next();
getWriter().force(dataFile);
}
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.data.IDataManager#delete()
*/
public synchronized boolean delete() throws IOException {
boolean result = true;
for (Iterator<DataFile> i = fileMap.values().iterator(); i.hasNext();) {
DataFile dataFile = i.next();
storeSize.addAndGet(-dataFile.getLength());
result &= dataFile.delete();
}
fileMap.clear();
return result;
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.data.IDataManager#addInterestInFile(int)
*/
public synchronized void addInterestInFile(int file) throws IOException {
if (file >= 0) {
Integer key = Integer.valueOf(file);
DataFile dataFile = fileMap.get(key);
if (dataFile == null) {
dataFile = createAndAddDataFile(file);
}
addInterestInFile(dataFile);
}
}
synchronized void addInterestInFile(DataFile dataFile) {
if (dataFile != null) {
dataFile.increment();
}
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.data.IDataManager#removeInterestInFile(int)
*/
public synchronized void removeInterestInFile(int file) throws IOException {
if (file >= 0) {
Integer key = Integer.valueOf(file);
DataFile dataFile = fileMap.get(key);
removeInterestInFile(dataFile);
}
}
synchronized void removeInterestInFile(DataFile dataFile) throws IOException {
if (dataFile != null) {
if (dataFile.decrement() <= 0) {
if (dataFile != currentWriteFile) {
removeDataFile(dataFile);
}
}
}
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.data.IDataManager#consolidateDataFiles()
*/
public synchronized void consolidateDataFiles() throws IOException {
List<DataFile> purgeList = new ArrayList<DataFile>();
for (Iterator<DataFile> i = fileMap.values().iterator(); i.hasNext();) {
DataFile dataFile = i.next();
if (dataFile.isUnused() && dataFile != currentWriteFile) {
purgeList.add(dataFile);
}
}
for (int i = 0; i < purgeList.size(); i++) {
DataFile dataFile = purgeList.get(i);
removeDataFile(dataFile);
}
}
private void removeDataFile(DataFile dataFile) throws IOException {
fileMap.remove(dataFile.getNumber());
if (writer != null) {
writer.force(dataFile);
}
storeSize.addAndGet(-dataFile.getLength());
boolean result = dataFile.delete();
LOG.debug("discarding data file " + dataFile + (result ? "successful " : "failed"));
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.data.IDataManager#getRedoMarshaller()
*/
public Marshaller getRedoMarshaller() {
return redoMarshaller;
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.data.IDataManager#setRedoMarshaller(org.apache.activemq.kaha.Marshaller)
*/
public void setRedoMarshaller(Marshaller redoMarshaller) {
this.redoMarshaller = redoMarshaller;
}
/**
* @return the maxFileLength
*/
public long getMaxFileLength() {
return maxFileLength;
}
/**
* @param maxFileLength the maxFileLength to set
*/
public void setMaxFileLength(long maxFileLength) {
this.maxFileLength = maxFileLength;
}
public String toString() {
return "DataManager:(" + NAME_PREFIX + name + ")";
}
public synchronized SyncDataFileReader getReader() {
if (reader == null) {
reader = createReader();
}
return reader;
}
protected synchronized SyncDataFileReader createReader() {
return new SyncDataFileReader(this);
}
public synchronized void setReader(SyncDataFileReader reader) {
this.reader = reader;
}
public synchronized SyncDataFileWriter getWriter() {
if (writer == null) {
writer = createWriter();
}
return writer;
}
private SyncDataFileWriter createWriter() {
return new SyncDataFileWriter(this);
}
public synchronized void setWriter(SyncDataFileWriter writer) {
this.writer = writer;
}
}

View File

@ -1,30 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.data;
/**
* A a wrapper for a data in the store
*
*
*/
public interface Item {
long POSITION_NOT_SET = -1;
short MAGIC = 31317;
int ACTIVE = 22;
int FREE = 33;
int LOCATION_SIZE = 24;
}

View File

@ -1,26 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.data;
import org.apache.activemq.kaha.StoreLocation;
public interface RedoListener {
void onRedoItem(StoreLocation item, Object object) throws Exception;
}

View File

@ -1,75 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.data;
import java.io.IOException;
import java.io.RandomAccessFile;
import org.apache.activemq.kaha.Marshaller;
import org.apache.activemq.kaha.StoreLocation;
import org.apache.activemq.util.DataByteArrayInputStream;
/**
* Optimized Store reader
*
*
*/
public final class SyncDataFileReader {
private DataManagerImpl dataManager;
private DataByteArrayInputStream dataIn;
/**
* Construct a Store reader
*
* @param fileId
*/
SyncDataFileReader(DataManagerImpl fileManager) {
this.dataManager = fileManager;
this.dataIn = new DataByteArrayInputStream();
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.data.DataFileReader#readDataItemSize(org.apache.activemq.kaha.impl.data.DataItem)
*/
public synchronized byte readDataItemSize(DataItem item) throws IOException {
RandomAccessFile file = dataManager.getDataFile(item).getRandomAccessFile();
file.seek(item.getOffset()); // jump to the size field
byte rc = file.readByte();
item.setSize(file.readInt());
return rc;
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.data.DataFileReader#readItem(org.apache.activemq.kaha.Marshaller,
* org.apache.activemq.kaha.StoreLocation)
*/
public synchronized Object readItem(Marshaller marshaller, StoreLocation item) throws IOException {
RandomAccessFile file = dataManager.getDataFile(item).getRandomAccessFile();
// TODO: we could reuse the buffer in dataIn if it's big enough to avoid
// allocating byte[] arrays on every readItem.
byte[] data = new byte[item.getSize()];
file.seek(item.getOffset() + DataManagerImpl.ITEM_HEAD_SIZE);
file.readFully(data);
dataIn.restart(data);
return marshaller.readPayload(dataIn);
}
}

View File

@ -1,115 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.data;
import java.io.IOException;
import java.io.RandomAccessFile;
import org.apache.activemq.kaha.Marshaller;
import org.apache.activemq.util.DataByteArrayOutputStream;
/**
* Optimized Store writer. Synchronously marshalls and writes to the data file.
* Simple but may introduce a bit of contention when put under load.
*
*
*/
public final class SyncDataFileWriter {
private DataByteArrayOutputStream buffer;
private DataManagerImpl dataManager;
/**
* Construct a Store writer
*
* @param fileId
*/
SyncDataFileWriter(DataManagerImpl fileManager) {
this.dataManager = fileManager;
this.buffer = new DataByteArrayOutputStream();
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.data.DataFileWriter#storeItem(org.apache.activemq.kaha.Marshaller,
* java.lang.Object, byte)
*/
public synchronized DataItem storeItem(Marshaller marshaller, Object payload, byte type)
throws IOException {
// Write the packet our internal buffer.
buffer.reset();
buffer.position(DataManagerImpl.ITEM_HEAD_SIZE);
marshaller.writePayload(payload, buffer);
int size = buffer.size();
int payloadSize = size - DataManagerImpl.ITEM_HEAD_SIZE;
buffer.reset();
buffer.writeByte(type);
buffer.writeInt(payloadSize);
// Find the position where this item will land at.
DataItem item = new DataItem();
item.setSize(payloadSize);
DataFile dataFile = dataManager.findSpaceForData(item);
// Now splat the buffer to the file.
dataFile.getRandomAccessFile().seek(item.getOffset());
dataFile.getRandomAccessFile().write(buffer.getData(), 0, size);
dataFile.setWriterData(Boolean.TRUE); // Use as dirty marker..
dataManager.addInterestInFile(dataFile);
return item;
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.data.DataFileWriter#updateItem(org.apache.activemq.kaha.StoreLocation,
* org.apache.activemq.kaha.Marshaller, java.lang.Object, byte)
*/
public synchronized void updateItem(DataItem item, Marshaller marshaller, Object payload, byte type)
throws IOException {
// Write the packet our internal buffer.
buffer.reset();
buffer.position(DataManagerImpl.ITEM_HEAD_SIZE);
marshaller.writePayload(payload, buffer);
int size = buffer.size();
int payloadSize = size - DataManagerImpl.ITEM_HEAD_SIZE;
buffer.reset();
buffer.writeByte(type);
buffer.writeInt(payloadSize);
item.setSize(payloadSize);
DataFile dataFile = dataManager.getDataFile(item);
RandomAccessFile file = dataFile.getRandomAccessFile();
file.seek(item.getOffset());
file.write(buffer.getData(), 0, size);
dataFile.setWriterData(Boolean.TRUE); // Use as dirty marker..
}
public synchronized void force(DataFile dataFile) throws IOException {
// If our dirty marker was set.. then we need to sync
if (dataFile.getWriterData() != null && dataFile.isDirty()) {
dataFile.getRandomAccessFile().getFD().sync();
dataFile.setWriterData(null);
dataFile.setDirty(false);
}
}
public void close() throws IOException {
}
}

View File

@ -1,48 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.index;
import java.io.IOException;
/**
* Occurs when bad magic occurs in reading a file
*
*
*/
public class BadMagicException extends IOException {
/**
*
*/
private static final long serialVersionUID = -570930196733067056L;
/**
* Default Constructor
*
*/
public BadMagicException() {
super();
}
/**
* Construct an Exception with a reason
*
* @param s
*/
public BadMagicException(String s) {
super(s);
}
}

View File

@ -1,356 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.index;
import java.io.IOException;
import org.apache.activemq.kaha.StoreEntry;
/**
* A linked list used by IndexItems
*
*
*/
public class DiskIndexLinkedList implements IndexLinkedList {
protected IndexManager indexManager;
protected transient IndexItem root;
protected transient IndexItem last;
protected transient int size;
/**
* Constructs an empty list.
*/
public DiskIndexLinkedList(IndexManager im, IndexItem header) {
this.indexManager = im;
this.root = header;
}
public synchronized IndexItem getRoot() {
return root;
}
public void setRoot(IndexItem e) {
this.root = e;
}
/**
* Returns the first element in this list.
*
* @return the first element in this list.
*/
public synchronized IndexItem getFirst() {
if (size == 0) {
return null;
}
return getNextEntry(root);
}
/**
* Returns the last element in this list.
*
* @return the last element in this list.
*/
public synchronized IndexItem getLast() {
if (size == 0) {
return null;
}
if (last != null) {
last.next = null;
last.setNextItem(IndexItem.POSITION_NOT_SET);
}
return last;
}
/**
* Removes and returns the first element from this list.
*
* @return the first element from this list.
*/
public synchronized StoreEntry removeFirst() {
if (size == 0) {
return null;
}
IndexItem result = getNextEntry(root);
remove(result);
return result;
}
/**
* Removes and returns the last element from this list.
*
* @return the last element from this list.
*/
public synchronized Object removeLast() {
if (size == 0) {
return null;
}
StoreEntry result = last;
remove(last);
return result;
}
/**
* Inserts the given element at the beginning of this list.
*
* @param o the element to be inserted at the beginning of this list.
*/
public synchronized void addFirst(IndexItem item) {
if (size == 0) {
last = item;
}
size++;
}
/**
* Appends the given element to the end of this list. (Identical in function
* to the <tt>add</tt> method; included only for consistency.)
*
* @param o the element to be inserted at the end of this list.
*/
public synchronized void addLast(IndexItem item) {
size++;
last = item;
}
/**
* Returns the number of elements in this list.
*
* @return the number of elements in this list.
*/
public synchronized int size() {
return size;
}
/**
* is the list empty?
*
* @return true if there are no elements in the list
*/
public synchronized boolean isEmpty() {
return size == 0;
}
/**
* Appends the specified element to the end of this list.
*
* @param o element to be appended to this list.
* @return <tt>true</tt> (as per the general contract of
* <tt>Collection.add</tt>).
*/
public synchronized boolean add(IndexItem item) {
addLast(item);
return true;
}
/**
* Removes all of the elements from this list.
*/
public synchronized void clear() {
last = null;
size = 0;
}
// Positional Access Operations
/**
* Returns the element at the specified position in this list.
*
* @param index index of element to return.
* @return the element at the specified position in this list.
* @throws IndexOutOfBoundsException if the specified index is is out of
* range (<tt>index &lt; 0 || index &gt;= size()</tt>).
*/
public synchronized IndexItem get(int index) {
return entry(index);
}
/**
* Inserts the specified element at the specified position in this list.
* Shifts the element currently at that position (if any) and any subsequent
* elements to the right (adds one to their indices).
*
* @param index index at which the specified element is to be inserted.
* @param element element to be inserted.
* @throws IndexOutOfBoundsException if the specified index is out of range (<tt>index &lt; 0 || index &gt; size()</tt>).
*/
public synchronized void add(int index, IndexItem element) {
if (index == size) {
last = element;
}
size++;
}
/**
* Removes the element at the specified position in this list. Shifts any
* subsequent elements to the left (subtracts one from their indices).
* Returns the element that was removed from the list.
*
* @param index the index of the element to removed.
* @return the element previously at the specified position.
* @throws IndexOutOfBoundsException if the specified index is out of range (<tt>index &lt; 0 || index &gt;= size()</tt>).
*/
public synchronized Object remove(int index) {
IndexItem e = entry(index);
remove(e);
return e;
}
/**
* Return the indexed entry.
*/
private IndexItem entry(int index) {
if (index < 0 || index >= size) {
throw new IndexOutOfBoundsException("Index: " + index + ", Size: " + size);
}
IndexItem e = root;
for (int i = 0; i <= index; i++) {
e = getNextEntry(e);
}
if (e != null && last != null && last.equals(e)) {
last = e;
}
return e;
}
// Search Operations
/**
* Returns the index in this list of the first occurrence of the specified
* element, or -1 if the List does not contain this element. More formally,
* returns the lowest index i such that
* <tt>(o==null ? get(i)==null : o.equals(get(i)))</tt>, or -1 if there
* is no such index.
*
* @param o element to search for.
* @return the index in this list of the first occurrence of the specified
* element, or -1 if the list does not contain this element.
*/
public synchronized int indexOf(StoreEntry o) {
int index = 0;
if (size > 0) {
for (IndexItem e = getNextEntry(root); e != null; e = getNextEntry(e)) {
if (o.equals(e)) {
return index;
}
index++;
}
}
return -1;
}
/**
* Retrieve the next entry after this entry
*
* @param entry
* @return next entry
*/
public synchronized IndexItem getNextEntry(IndexItem current) {
IndexItem result = null;
if (current != null) {
current = (IndexItem) refreshEntry(current);
if (current.getNextItem() >= 0) {
try {
result = indexManager.getIndex(current.getNextItem());
} catch (IOException e) {
throw new RuntimeException("Failed to get next index from "
+ indexManager + " for " + current, e);
}
}
}
// essential last get's updated consistently
if (result != null && last != null && last.equals(result)) {
last=result;
}
return result;
}
/**
* Retrive the prev entry after this entry
*
* @param entry
* @return prev entry
*/
public synchronized IndexItem getPrevEntry(IndexItem current) {
IndexItem result = null;
if (current != null) {
if (current.getPreviousItem() >= 0) {
current = (IndexItem) refreshEntry(current);
try {
result = indexManager.getIndex(current.getPreviousItem());
} catch (IOException e) {
throw new RuntimeException(
"Failed to get current index for " + current, e);
}
}
}
// essential root get's updated consistently
if (result != null && root != null && root.equals(result)) {
return null;
}
return result;
}
public synchronized StoreEntry getEntry(StoreEntry current) {
StoreEntry result = null;
if (current != null && current.getOffset() >= 0) {
try {
result = indexManager.getIndex(current.getOffset());
} catch (IOException e) {
throw new RuntimeException("Failed to index", e);
}
}
// essential root get's updated consistently
if (result != null && root != null && root.equals(result)) {
return root;
}
return result;
}
/**
* Update the indexes of a StoreEntry
*
* @param current
*/
public synchronized StoreEntry refreshEntry(StoreEntry current) {
StoreEntry result = null;
if (current != null && current.getOffset() >= 0) {
try {
result = indexManager.refreshIndex((IndexItem)current);
} catch (IOException e) {
throw new RuntimeException("Failed to index", e);
}
}
// essential root get's updated consistently
if (result != null && root != null && root.equals(result)) {
return root;
}
return result;
}
public synchronized void remove(IndexItem e) {
if (e==null || e == root || e.equals(root)) {
return;
}
if (e == last || e.equals(last)) {
if (size > 1) {
last = (IndexItem)refreshEntry(last);
last = getPrevEntry(last);
} else {
last = null;
}
}
size--;
}
}

View File

@ -1,106 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.index;
import java.io.IOException;
import org.apache.activemq.kaha.Marshaller;
import org.apache.activemq.kaha.StoreEntry;
/**
* Simplier than a Map
*
*
*/
public interface Index {
/**
* clear the index
*
* @throws IOException
*
*/
void clear() throws IOException;
/**
* @param key
* @return true if it contains the key
* @throws IOException
*/
boolean containsKey(Object key) throws IOException;
/**
* remove the index key
*
* @param key
* @return StoreEntry removed
* @throws IOException
*/
StoreEntry remove(Object key) throws IOException;
/**
* store the key, item
*
* @param key
* @param entry
* @throws IOException
*/
void store(Object key, StoreEntry entry) throws IOException;
/**
* @param key
* @return the entry
* @throws IOException
*/
StoreEntry get(Object key) throws IOException;
/**
* @return true if the index is transient
*/
boolean isTransient();
/**
* load indexes
*/
void load();
/**
* unload indexes
*
* @throws IOException
*/
void unload() throws IOException;
/**
* Set the marshaller for key objects
*
* @param marshaller
*/
void setKeyMarshaller(Marshaller marshaller);
/**
* return the size of the index
* @return
*/
int getSize();
/**
* delete all state associated with the index
*
* @throws IOException
*/
void delete() throws IOException;
}

View File

@ -1,332 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.index;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.activemq.kaha.StoreEntry;
import org.apache.activemq.kaha.StoreLocation;
import org.apache.activemq.kaha.impl.data.DataItem;
import org.apache.activemq.kaha.impl.data.Item;
/**
* A an Item with a relative position and location to other Items in the Store
*
*
*/
public class IndexItem implements Item, StoreEntry {
public static final int INDEX_SIZE = 51;
public static final int INDEXES_ONLY_SIZE = 19;
protected long offset = POSITION_NOT_SET;
// used by linked list
IndexItem next;
IndexItem prev;
private long previousItem = POSITION_NOT_SET;
private long nextItem = POSITION_NOT_SET;
private boolean active = true;
// TODO: consider just using a DataItem for the following fields.
private long keyOffset = POSITION_NOT_SET;
private int keyFile = (int)POSITION_NOT_SET;
private int keySize;
private long valueOffset = POSITION_NOT_SET;
private int valueFile = (int)POSITION_NOT_SET;
private int valueSize;
/**
* Default Constructor
*/
public IndexItem() {
}
void reset() {
previousItem = POSITION_NOT_SET;
nextItem = POSITION_NOT_SET;
keyOffset = POSITION_NOT_SET;
keyFile = (int)POSITION_NOT_SET;
keySize = 0;
valueOffset = POSITION_NOT_SET;
valueFile = (int)POSITION_NOT_SET;
valueSize = 0;
active = true;
}
/**
* @return
* @see org.apache.activemq.kaha.StoreEntry#getKeyDataItem()
*/
public StoreLocation getKeyDataItem() {
DataItem result = new DataItem();
result.setOffset(keyOffset);
result.setFile(keyFile);
result.setSize(keySize);
return result;
}
/**
* @return
* @see org.apache.activemq.kaha.StoreEntry#getValueDataItem()
*/
public StoreLocation getValueDataItem() {
DataItem result = new DataItem();
result.setOffset(valueOffset);
result.setFile(valueFile);
result.setSize(valueSize);
return result;
}
public void setValueData(StoreLocation item) {
valueOffset = item.getOffset();
valueFile = item.getFile();
valueSize = item.getSize();
}
public void setKeyData(StoreLocation item) {
keyOffset = item.getOffset();
keyFile = item.getFile();
keySize = item.getSize();
}
/**
* @param dataOut
* @throws IOException
*/
public void write(DataOutput dataOut) throws IOException {
dataOut.writeShort(MAGIC);
dataOut.writeBoolean(active);
dataOut.writeLong(previousItem);
dataOut.writeLong(nextItem);
dataOut.writeInt(keyFile);
dataOut.writeLong(keyOffset);
dataOut.writeInt(keySize);
dataOut.writeInt(valueFile);
dataOut.writeLong(valueOffset);
dataOut.writeInt(valueSize);
}
void updateIndexes(DataOutput dataOut) throws IOException {
dataOut.writeShort(MAGIC);
dataOut.writeBoolean(active);
dataOut.writeLong(previousItem);
dataOut.writeLong(nextItem);
}
/**
* @param dataIn
* @throws IOException
*/
public void read(DataInput dataIn) throws IOException {
if (dataIn.readShort() != MAGIC) {
throw new BadMagicException();
}
active = dataIn.readBoolean();
previousItem = dataIn.readLong();
nextItem = dataIn.readLong();
keyFile = dataIn.readInt();
keyOffset = dataIn.readLong();
keySize = dataIn.readInt();
valueFile = dataIn.readInt();
valueOffset = dataIn.readLong();
valueSize = dataIn.readInt();
}
void readIndexes(DataInput dataIn) throws IOException {
if (dataIn.readShort() != MAGIC) {
throw new BadMagicException();
}
active = dataIn.readBoolean();
previousItem = dataIn.readLong();
nextItem = dataIn.readLong();
}
/**
* @param newPrevEntry
*/
public void setPreviousItem(long newPrevEntry) {
previousItem = newPrevEntry;
}
/**
* @return prev item
*/
long getPreviousItem() {
return previousItem;
}
/**
* @param newNextEntry
*/
public void setNextItem(long newNextEntry) {
nextItem = newNextEntry;
}
/**
* @return
* @see org.apache.activemq.kaha.StoreEntry#getNextItem()
*/
public long getNextItem() {
return nextItem;
}
/**
* @param newObjectOffset
*/
void setKeyOffset(long newObjectOffset) {
keyOffset = newObjectOffset;
}
/**
* @return key offset
*/
long getKeyOffset() {
return keyOffset;
}
/**
* @return
* @see org.apache.activemq.kaha.StoreEntry#getKeyFile()
*/
public int getKeyFile() {
return keyFile;
}
/**
* @param keyFile The keyFile to set.
*/
void setKeyFile(int keyFile) {
this.keyFile = keyFile;
}
/**
* @return
* @see org.apache.activemq.kaha.StoreEntry#getValueFile()
*/
public int getValueFile() {
return valueFile;
}
/**
* @param valueFile The valueFile to set.
*/
void setValueFile(int valueFile) {
this.valueFile = valueFile;
}
/**
* @return
* @see org.apache.activemq.kaha.StoreEntry#getValueOffset()
*/
public long getValueOffset() {
return valueOffset;
}
/**
* @param valueOffset The valueOffset to set.
*/
public void setValueOffset(long valueOffset) {
this.valueOffset = valueOffset;
}
/**
* @return Returns the active.
*/
boolean isActive() {
return active;
}
/**
* @param active The active to set.
*/
void setActive(boolean active) {
this.active = active;
}
/**
* @return
* @see org.apache.activemq.kaha.StoreEntry#getOffset()
*/
public long getOffset() {
return offset;
}
/**
* @param offset The offset to set.
*/
public void setOffset(long offset) {
this.offset = offset;
}
/**
* @return
* @see org.apache.activemq.kaha.StoreEntry#getKeySize()
*/
public int getKeySize() {
return keySize;
}
public void setKeySize(int keySize) {
this.keySize = keySize;
}
/**
* @return
* @see org.apache.activemq.kaha.StoreEntry#getValueSize()
*/
public int getValueSize() {
return valueSize;
}
public void setValueSize(int valueSize) {
this.valueSize = valueSize;
}
void copyIndex(IndexItem other) {
this.offset=other.offset;
this.active=other.active;
this.previousItem=other.previousItem;
this.nextItem=other.nextItem;
}
/**
* @return print of 'this'
*/
public String toString() {
String result = "offset=" + offset + ", key=(" + keyFile + ", " + keyOffset + ", " + keySize + ")" + ", value=(" + valueFile + ", " + valueOffset + ", " + valueSize + ")"
+ ", previousItem=" + previousItem + ", nextItem=" + nextItem;
return result;
}
public boolean equals(Object obj) {
boolean result = obj == this;
if (!result && obj != null && obj instanceof IndexItem) {
IndexItem other = (IndexItem)obj;
result = other.offset == this.offset;
}
return result;
}
public int hashCode() {
return (int)offset;
}
}

View File

@ -1,199 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.index;
import org.apache.activemq.kaha.StoreEntry;
/**
* Inteface to LinkedList of Indexes
*
*
*/
public interface IndexLinkedList {
/**
* Set the new Root
* @param newRoot
*/
void setRoot(IndexItem newRoot);
/**
* @return the root used by the List
*/
IndexItem getRoot();
/**
* Returns the first element in this list.
*
* @return the first element in this list.
*/
IndexItem getFirst();
/**
* Returns the last element in this list.
*
* @return the last element in this list.
*/
IndexItem getLast();
/**
* Removes and returns the first element from this list.
*
* @return the first element from this list.
*/
StoreEntry removeFirst();
/**
* Removes and returns the last element from this list.
*
* @return the last element from this list.
*/
Object removeLast();
/**
* Inserts the given element at the beginning of this list.
*
* @param item
*/
void addFirst(IndexItem item);
/**
* Appends the given element to the end of this list. (Identical in function
* to the <tt>add</tt> method; included only for consistency.)
*
* @param item
*/
void addLast(IndexItem item);
/**
* Returns the number of elements in this list.
*
* @return the number of elements in this list.
*/
int size();
/**
* is the list empty?
*
* @return true if there are no elements in the list
*/
boolean isEmpty();
/**
* Appends the specified element to the end of this list.
*
* @param item
*
* @return <tt>true</tt> (as per the general contract of
* <tt>Collection.add</tt>).
*/
boolean add(IndexItem item);
/**
* Removes all of the elements from this list.
*/
void clear();
// Positional Access Operations
/**
* Returns the element at the specified position in this list.
*
* @param index index of element to return.
* @return the element at the specified position in this list.
*
* @throws IndexOutOfBoundsException if the specified index is is out of
* range (<tt>index &lt; 0 || index &gt;= size()</tt>).
*/
IndexItem get(int index);
/**
* Inserts the specified element at the specified position in this list.
* Shifts the element currently at that position (if any) and any subsequent
* elements to the right (adds one to their indices).
*
* @param index index at which the specified element is to be inserted.
* @param element element to be inserted.
*
* @throws IndexOutOfBoundsException if the specified index is out of range (<tt>index &lt; 0 || index &gt; size()</tt>).
*/
void add(int index, IndexItem element);
/**
* Removes the element at the specified position in this list. Shifts any
* subsequent elements to the left (subtracts one from their indices).
* Returns the element that was removed from the list.
*
* @param index the index of the element to removed.
* @return the element previously at the specified position.
*
* @throws IndexOutOfBoundsException if the specified index is out of range (<tt>index &lt; 0 || index &gt;= size()</tt>).
*/
Object remove(int index);
// Search Operations
/**
* Returns the index in this list of the first occurrence of the specified
* element, or -1 if the List does not contain this element. More formally,
* returns the lowest index i such that
* <tt>(o==null ? get(i)==null : o.equals(get(i)))</tt>, or -1 if there
* is no such index.
*
* @param o element to search for.
* @return the index in this list of the first occurrence of the specified
* element, or -1 if the list does not contain this element.
*/
int indexOf(StoreEntry o);
/**
* Retrieve the next entry after this entry
*
* @param entry
* @return next entry
*/
IndexItem getNextEntry(IndexItem entry);
/**
* Retrive the prev entry after this entry
*
* @param entry
* @return prev entry
*/
IndexItem getPrevEntry(IndexItem entry);
/**
* remove an entry
*
* @param e
*/
void remove(IndexItem e);
/**
* Ensure we have the up to date entry
*
* @param entry
* @return the entry
*/
StoreEntry getEntry(StoreEntry entry);
/**
* Update the indexes of a StoreEntry
*
* @param current
* @return update StoreEntry
*/
StoreEntry refreshEntry(StoreEntry current);
}

View File

@ -1,225 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.index;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.channels.FileLock;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.activemq.kaha.impl.DataManager;
import org.apache.activemq.util.IOHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Optimized Store reader
*
*
*/
public final class IndexManager {
public static final String NAME_PREFIX = "index-";
private static final Logger LOG = LoggerFactory.getLogger(IndexManager.class);
private final String name;
private File directory;
private File file;
private RandomAccessFile indexFile;
private StoreIndexReader reader;
private StoreIndexWriter writer;
private DataManager redoLog;
private String mode;
private long length;
private IndexItem firstFree;
private IndexItem lastFree;
private boolean dirty;
private final AtomicLong storeSize;
private int freeSize = 0;
public IndexManager(File directory, String name, String mode, DataManager redoLog, AtomicLong storeSize) throws IOException {
this.directory = directory;
this.name = name;
this.mode = mode;
this.redoLog = redoLog;
this.storeSize=storeSize;
initialize();
}
public synchronized boolean isEmpty() {
return lastFree == null && length == 0;
}
public synchronized IndexItem getIndex(long offset) throws IOException {
IndexItem result = null;
if (offset >= 0) {
result = reader.readItem(offset);
}
return result;
}
public synchronized IndexItem refreshIndex(IndexItem item) throws IOException {
reader.updateIndexes(item);
return item;
}
public synchronized void freeIndex(IndexItem item) throws IOException {
item.reset();
item.setActive(false);
if (lastFree == null) {
firstFree = item;
lastFree = item;
} else {
lastFree.setNextItem(item.getOffset());
if (lastFree.equals(firstFree)) {
firstFree=new IndexItem();
firstFree.copyIndex(lastFree);
writer.updateIndexes(firstFree);
}
writer.updateIndexes(lastFree);
lastFree=item;
}
writer.updateIndexes(item);
freeSize++;
dirty = true;
}
public synchronized void storeIndex(IndexItem index) throws IOException {
writer.storeItem(index);
dirty = true;
}
public synchronized void updateIndexes(IndexItem index) throws IOException {
try {
writer.updateIndexes(index);
} catch (Throwable e) {
LOG.error(name + " error updating indexes ", e);
}
dirty = true;
}
public synchronized void redo(final RedoStoreIndexItem redo) throws IOException {
writer.redoStoreItem(redo);
dirty = true;
}
public synchronized IndexItem createNewIndex() throws IOException {
IndexItem result = getNextFreeIndex();
if (result == null) {
// allocate one
result = new IndexItem();
result.setOffset(length);
length += IndexItem.INDEX_SIZE;
storeSize.addAndGet(IndexItem.INDEX_SIZE);
}
return result;
}
public synchronized void close() throws IOException {
if (indexFile != null) {
indexFile.close();
indexFile = null;
}
}
public synchronized void force() throws IOException {
if (indexFile != null && dirty) {
indexFile.getFD().sync();
dirty = false;
}
}
public synchronized boolean delete() throws IOException {
firstFree = null;
lastFree = null;
if (indexFile != null) {
indexFile.close();
indexFile = null;
}
return file.delete();
}
private synchronized IndexItem getNextFreeIndex() throws IOException {
IndexItem result = null;
if (firstFree != null) {
if (firstFree.equals(lastFree)) {
result = firstFree;
firstFree = null;
lastFree = null;
} else {
result = firstFree;
firstFree = getIndex(firstFree.getNextItem());
if (firstFree == null) {
lastFree = null;
}
}
result.reset();
writer.updateIndexes(result);
freeSize--;
}
return result;
}
synchronized long getLength() {
return length;
}
public final long size() {
return length;
}
public synchronized void setLength(long value) {
this.length = value;
storeSize.addAndGet(length);
}
public synchronized FileLock getLock() throws IOException {
return indexFile.getChannel().tryLock(0, Math.max(1, indexFile.getChannel().size()), false);
}
public String toString() {
return "IndexManager:(" + NAME_PREFIX + name + ")";
}
protected void initialize() throws IOException {
file = new File(directory, NAME_PREFIX + IOHelper.toFileSystemSafeName(name) );
IOHelper.mkdirs(file.getParentFile());
indexFile = new RandomAccessFile(file, mode);
reader = new StoreIndexReader(indexFile);
writer = new StoreIndexWriter(indexFile, name, redoLog);
long offset = 0;
while ((offset + IndexItem.INDEX_SIZE) <= indexFile.length()) {
IndexItem index = reader.readItem(offset);
if (!index.isActive()) {
index.reset();
if (lastFree != null) {
lastFree.setNextItem(index.getOffset());
updateIndexes(lastFree);
lastFree = index;
} else {
lastFree = index;
firstFree = index;
}
freeSize++;
}
offset += IndexItem.INDEX_SIZE;
}
length = offset;
storeSize.addAndGet(length);
}
}

View File

@ -1,102 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.index;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.Externalizable;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import org.apache.activemq.kaha.Marshaller;
public class RedoStoreIndexItem implements Externalizable {
public static final Marshaller MARSHALLER = new Marshaller() {
public Object readPayload(DataInput in) throws IOException {
RedoStoreIndexItem item = new RedoStoreIndexItem();
item.readExternal(in);
return item;
}
public void writePayload(Object object, DataOutput out) throws IOException {
RedoStoreIndexItem item = (RedoStoreIndexItem)object;
item.writeExternal(out);
}
};
private static final long serialVersionUID = -4865508871719676655L;
private String indexName;
private IndexItem indexItem;
private long offset;
public RedoStoreIndexItem() {
}
public RedoStoreIndexItem(String indexName, long offset, IndexItem item) {
this.indexName = indexName;
this.offset = offset;
this.indexItem = item;
}
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
readExternal((DataInput)in);
}
public void readExternal(DataInput in) throws IOException {
// indexName = in.readUTF();
offset = in.readLong();
indexItem = new IndexItem();
indexItem.read(in);
}
public void writeExternal(ObjectOutput out) throws IOException {
writeExternal((DataOutput)out);
}
public void writeExternal(DataOutput out) throws IOException {
// out.writeUTF(indexName);
out.writeLong(offset);
indexItem.write(out);
}
public String getIndexName() {
return indexName;
}
public void setIndexName(String indexName) {
this.indexName = indexName;
}
public IndexItem getIndexItem() {
return indexItem;
}
public void setIndexItem(IndexItem item) {
this.indexItem = item;
}
public long getOffset() {
return offset;
}
public void setOffset(long offset) {
this.offset = offset;
}
}

View File

@ -1,62 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.index;
import java.io.IOException;
import java.io.RandomAccessFile;
import org.apache.activemq.util.DataByteArrayInputStream;
/**
* Optimized Store reader
*
*
*/
class StoreIndexReader {
protected RandomAccessFile file;
protected DataByteArrayInputStream dataIn;
protected byte[] buffer = new byte[IndexItem.INDEX_SIZE];
/**
* Construct a Store reader
*
* @param file
*/
StoreIndexReader(RandomAccessFile file) {
this.file = file;
this.dataIn = new DataByteArrayInputStream();
}
protected IndexItem readItem(long offset) throws IOException {
file.seek(offset);
file.readFully(buffer);
dataIn.restart(buffer);
IndexItem result = new IndexItem();
result.setOffset(offset);
result.read(dataIn);
return result;
}
void updateIndexes(IndexItem indexItem) throws IOException {
if (indexItem != null) {
file.seek(indexItem.getOffset());
file.readFully(buffer, 0, IndexItem.INDEXES_ONLY_SIZE);
dataIn.restart(buffer);
indexItem.readIndexes(dataIn);
}
}
}

View File

@ -1,84 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.index;
import java.io.IOException;
import java.io.RandomAccessFile;
import org.apache.activemq.kaha.impl.DataManager;
import org.apache.activemq.util.DataByteArrayOutputStream;
/**
* Optimized Store writer
*
*
*/
class StoreIndexWriter {
protected final DataByteArrayOutputStream dataOut = new DataByteArrayOutputStream();
protected final RandomAccessFile file;
protected final String name;
protected final DataManager redoLog;
/**
* Construct a Store index writer
*
* @param file
*/
StoreIndexWriter(RandomAccessFile file) {
this(file, null, null);
}
public StoreIndexWriter(RandomAccessFile file, String indexName, DataManager redoLog) {
this.file = file;
this.name = indexName;
this.redoLog = redoLog;
}
void storeItem(IndexItem indexItem) throws IOException {
if (redoLog != null) {
RedoStoreIndexItem redo = new RedoStoreIndexItem(name, indexItem.getOffset(), indexItem);
redoLog.storeRedoItem(redo);
}
dataOut.reset();
indexItem.write(dataOut);
file.seek(indexItem.getOffset());
file.write(dataOut.getData(), 0, IndexItem.INDEX_SIZE);
}
void updateIndexes(IndexItem indexItem) throws IOException {
if (redoLog != null) {
RedoStoreIndexItem redo = new RedoStoreIndexItem(name, indexItem.getOffset(), indexItem);
redoLog.storeRedoItem(redo);
}
dataOut.reset();
indexItem.updateIndexes(dataOut);
file.seek(indexItem.getOffset());
file.write(dataOut.getData(), 0, IndexItem.INDEXES_ONLY_SIZE);
}
public void redoStoreItem(RedoStoreIndexItem redo) throws IOException {
dataOut.reset();
redo.getIndexItem().write(dataOut);
file.seek(redo.getOffset());
file.write(dataOut.getData(), 0, IndexItem.INDEX_SIZE);
}
}

View File

@ -1,135 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.index;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import org.apache.activemq.kaha.IndexMBean;
import org.apache.activemq.kaha.Marshaller;
import org.apache.activemq.kaha.StoreEntry;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Index implementation using a HashMap
*
*
*/
public class VMIndex implements Index, IndexMBean {
private static final Logger LOG = LoggerFactory.getLogger(VMIndex.class);
private IndexManager indexManager;
private Map<Object, StoreEntry> map = new HashMap<Object, StoreEntry>();
public VMIndex(IndexManager manager) {
this.indexManager = manager;
}
/**
*
* @see org.apache.activemq.kaha.impl.index.Index#clear()
*/
public void clear() {
map.clear();
}
/**
* @param key
* @return true if the index contains the key
* @see org.apache.activemq.kaha.impl.index.Index#containsKey(java.lang.Object)
*/
public boolean containsKey(Object key) {
return map.containsKey(key);
}
/**
* @param key
* @return store entry
* @see org.apache.activemq.kaha.impl.index.Index#removeKey(java.lang.Object)
*/
public StoreEntry remove(Object key) {
StoreEntry result = map.remove(key);
if (result != null) {
try {
result = indexManager.refreshIndex((IndexItem)result);
} catch (IOException e) {
LOG.error("Failed to refresh entry", e);
throw new RuntimeException("Failed to refresh entry");
}
}
return result;
}
/**
* @param key
* @param entry
* @see org.apache.activemq.kaha.impl.index.Index#store(java.lang.Object,
* org.apache.activemq.kaha.impl.index.IndexItem)
*/
public void store(Object key, StoreEntry entry) {
map.put(key, entry);
}
/**
* @param key
* @return the entry
*/
public StoreEntry get(Object key) {
StoreEntry result = map.get(key);
if (result != null) {
try {
result = indexManager.refreshIndex((IndexItem)result);
} catch (IOException e) {
LOG.error("Failed to refresh entry", e);
throw new RuntimeException("Failed to refresh entry");
}
}
return result;
}
/**
* @return true if the index is transient
*/
public boolean isTransient() {
return true;
}
/**
* load indexes
*/
public void load() {
}
/**
* unload indexes
*/
public void unload() {
map.clear();
}
public void delete() throws IOException {
unload();
}
public void setKeyMarshaller(Marshaller marshaller) {
}
public int getSize() {
return map.size();
}
}

View File

@ -1,293 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.index;
import org.apache.activemq.kaha.StoreEntry;
/**
* A linked list used by IndexItems
*
*
*/
public final class VMIndexLinkedList implements Cloneable, IndexLinkedList {
private transient IndexItem root;
private transient int size;
/**
* Constructs an empty list.
* @param header
*/
public VMIndexLinkedList(IndexItem header) {
this.root = header;
this.root.next=this.root.prev=this.root;
}
public void setRoot(IndexItem newRoot) {
this.root=newRoot;
}
public synchronized IndexItem getRoot() {
return root;
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.IndexLinkedList#getFirst()
*/
public synchronized IndexItem getFirst() {
if (size == 0) {
return null;
}
return root.next;
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.IndexLinkedList#getLast()
*/
public synchronized IndexItem getLast() {
if (size == 0) {
return null;
}
return root.prev;
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.IndexLinkedList#removeFirst()
*/
public synchronized StoreEntry removeFirst() {
if (size == 0) {
return null;
}
StoreEntry result = root.next;
remove(root.next);
return result;
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.IndexLinkedList#removeLast()
*/
public synchronized Object removeLast() {
if (size == 0) {
return null;
}
StoreEntry result = root.prev;
remove(root.prev);
return result;
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.IndexLinkedList#addFirst(org.apache.activemq.kaha.impl.IndexItem)
*/
public synchronized void addFirst(IndexItem item) {
addBefore(item, root.next);
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.IndexLinkedList#addLast(org.apache.activemq.kaha.impl.IndexItem)
*/
public synchronized void addLast(IndexItem item) {
addBefore(item, root);
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.IndexLinkedList#size()
*/
public synchronized int size() {
return size;
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.IndexLinkedList#isEmpty()
*/
public synchronized boolean isEmpty() {
return size == 0;
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.IndexLinkedList#add(org.apache.activemq.kaha.impl.IndexItem)
*/
public synchronized boolean add(IndexItem item) {
addBefore(item, root);
return true;
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.IndexLinkedList#clear()
*/
public synchronized void clear() {
root.next=root.prev=root;
size = 0;
}
// Positional Access Operations
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.IndexLinkedList#get(int)
*/
public synchronized IndexItem get(int index) {
return entry(index);
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.IndexLinkedList#add(int,
* org.apache.activemq.kaha.impl.IndexItem)
*/
public synchronized void add(int index, IndexItem element) {
addBefore(element, index == size ? root : entry(index));
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.IndexLinkedList#remove(int)
*/
public synchronized Object remove(int index) {
IndexItem e = entry(index);
remove(e);
return e;
}
/**
* Return the indexed entry.
*/
private IndexItem entry(int index) {
if (index < 0 || index >= size) {
throw new IndexOutOfBoundsException("Index: " + index + ", Size: " + size);
}
IndexItem e = root;
if (index < size / 2) {
for (int i = 0; i <= index; i++) {
e = e.next;
}
} else {
for (int i = size; i > index; i--) {
e = e.prev;
}
}
return e;
}
// Search Operations
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.IndexLinkedList#indexOf(org.apache.activemq.kaha.impl.IndexItem)
*/
public synchronized int indexOf(StoreEntry o) {
int index = 0;
for (IndexItem e = root.next; e != root; e = e.next) {
if (o == e) {
return index;
}
index++;
}
return -1;
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.IndexLinkedList#getNextEntry(org.apache.activemq.kaha.impl.IndexItem)
*/
public synchronized IndexItem getNextEntry(IndexItem entry) {
return entry.next != root ? entry.next : null;
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.IndexLinkedList#getPrevEntry(org.apache.activemq.kaha.impl.IndexItem)
*/
public synchronized IndexItem getPrevEntry(IndexItem entry) {
return entry.prev != root ? entry.prev : null;
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.IndexLinkedList#addBefore(org.apache.activemq.kaha.impl.IndexItem,
* org.apache.activemq.kaha.impl.IndexItem)
*/
public synchronized void addBefore(IndexItem insert, IndexItem e) {
insert.next = e;
insert.prev = e.prev;
insert.prev.next = insert;
insert.next.prev = insert;
size++;
}
/*
* (non-Javadoc)
*
* @see org.apache.activemq.kaha.impl.IndexLinkedList#remove(org.apache.activemq.kaha.impl.IndexItem)
*/
public synchronized void remove(IndexItem e) {
if (e == root || e.equals(root)) {
return;
}
e.prev.next = e.next;
e.next.prev = e.prev;
size--;
}
/**
* @return clone
*/
public synchronized Object clone() {
IndexLinkedList clone = new VMIndexLinkedList(this.root);
for (IndexItem e = root.next; e != root; e = e.next) {
clone.add(e);
}
return clone;
}
public synchronized StoreEntry getEntry(StoreEntry current) {
return current;
}
/**
* Update the indexes of a StoreEntry
*
* @param current
*/
public synchronized StoreEntry refreshEntry(StoreEntry current) {
return current;
}
}

View File

@ -1,341 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.index.hash;
import java.io.IOException;
/**
* Bin in a HashIndex
*
*
*/
class HashBin {
private HashIndex hashIndex;
private int id;
private int maximumEntries;
private int size;
private int numberOfPages =0;
private HashPageInfo root = null;
private HashPageInfo tail = null;
/**
* Constructor
*
* @param hashIndex
* @param id
* @param maximumEntries
*/
HashBin(HashIndex hashIndex, int id, int maximumEntries) {
this.hashIndex = hashIndex;
this.id = id;
this.maximumEntries = maximumEntries;
}
public String toString() {
return "HashBin[" + getId() + "]";
}
public boolean equals(Object o) {
boolean result = false;
if (o instanceof HashBin) {
HashBin other = (HashBin)o;
result = other.id == id;
}
return result;
}
public int hashCode() {
return (int)getId();
}
int getId() {
return id;
}
void setId(int id) {
this.id = id;
}
boolean isEmpty() {
return true;
}
int getMaximumEntries() {
return this.maximumEntries;
}
void setMaximumEntries(int maximumEntries) {
this.maximumEntries = maximumEntries;
}
int size() {
return size;
}
HashPageInfo addHashPageInfo(long id, int size) throws IOException {
HashPageInfo info = new HashPageInfo(hashIndex);
info.setId(id);
info.setSize(size);
if (root == null) {
root=info;
}else {
tail.linkAfter(info);
}
tail=info;
this.numberOfPages++;
this.size += size;
return info;
}
public HashEntry find(HashEntry key) throws IOException {
HashEntry result = null;
try {
int low = 0;
int high = size()-1;
while (low <= high) {
int mid = (low + high) >> 1;
HashEntry te = getHashEntry(mid);
int cmp = te.compareTo(key);
if (cmp == 0) {
result = te;
break;
} else if (cmp < 0) {
low = mid + 1;
} else {
high = mid - 1;
}
}
} finally {
end();
}
return result;
}
boolean put(HashEntry newEntry) throws IOException {
boolean replace = false;
try {
int low = 0;
int high = size()-1;
while (low <= high) {
int mid = (low + high) >> 1;
HashEntry midVal = getHashEntry(mid);
int cmp = midVal.compareTo(newEntry);
if (cmp < 0) {
low = mid + 1;
} else if (cmp > 0) {
high = mid - 1;
} else {
replace = true;
midVal.setIndexOffset(newEntry.getIndexOffset());
break;
}
}
if (!replace) {
addHashEntry(low, newEntry);
size++;
}
} finally {
end();
}
return replace;
}
HashEntry remove(HashEntry entry) throws IOException {
HashEntry result = null;
try {
int low = 0;
int high = size() - 1;
while (low <= high) {
int mid = (low + high) >> 1;
HashEntry te = getHashEntry(mid);
int cmp = te.compareTo(entry);
if (cmp == 0) {
result = te;
removeHashEntry(mid);
size--;
break;
} else if (cmp < 0) {
low = mid + 1;
} else {
high = mid - 1;
}
}
} finally {
end();
}
return result;
}
private void addHashEntry(int index, HashEntry entry) throws IOException {
HashPageInfo pageToUse = null;
int offset = 0;
if (index >= getMaximumBinSize()) {
while(index >= getMaximumBinSize()) {
HashPage hp = hashIndex.createPage(id);
pageToUse = addHashPageInfo(hp.getId(), 0);
pageToUse.setPage(hp);
}
offset = 0;
} else {
int count = 0;
int countSoFar=0;
int pageNo = 0;
HashPageInfo page = root;
while (page != null) {
count += page.size();
pageToUse=page;
if (index < count ) {
offset = index - countSoFar;
break;
}
if (index == count && page.size()+1 <= maximumEntries) {
offset = page.size();
break;
}
countSoFar += page.size();
pageNo++;
page = (HashPageInfo) page.getNext();
}
while(pageNo >= this.numberOfPages) {
HashPage hp = hashIndex.createPage(id);
pageToUse = addHashPageInfo(hp.getId(), 0);
}
}
pageToUse.begin();
pageToUse.addHashEntry(offset, entry);
doOverFlow(index);
}
private HashEntry removeHashEntry(int index) throws IOException {
HashPageInfo page = getRetrievePage(index);
int offset = getRetrieveOffset(index);
HashEntry result = page.removeHashEntry(offset);
if (page.isEmpty()) {
if (root.equals(page)) {
root=(HashPageInfo) root.getNext();
}
if (tail.equals(page)) {
tail=(HashPageInfo) page.getPrevious();
}
page.unlink();
this.numberOfPages--;
hashIndex.releasePage(page.getPage());
}
doUnderFlow(index);
return result;
}
private HashEntry getHashEntry(int index) throws IOException {
HashPageInfo page = getRetrievePage(index);
page.begin();
int offset = getRetrieveOffset(index);
HashEntry result = page.getHashEntry(offset);
return result;
}
private int getMaximumBinSize() {
return maximumEntries * this.numberOfPages;
}
private HashPageInfo getRetrievePage(int index) throws IOException {
HashPageInfo result = null;
int count = 0;
HashPageInfo page = root;
while (page != null) {
count += page.size();
result = page;
if (index < count) {
break;
}
page = (HashPageInfo) page.getNext();
}
result.begin();
return result;
}
private int getRetrieveOffset(int index) throws IOException {
int result = 0;
int count = 0;
HashPageInfo page = root;
while (page != null) {
if ((index + 1) <= (count + page.size())) {
result = index - count;
break;
}
count += page.size();
page = (HashPageInfo) page.getNext();
}
return result;
}
private void doOverFlow(int index) throws IOException {
HashPageInfo info = getRetrievePage(index);
if (info.size() > maximumEntries) {
// overflowed
info.begin();
HashEntry entry = info.removeHashEntry(info.size() - 1);
doOverFlow(getNextPage(info), entry);
}
}
private void doOverFlow(HashPageInfo next, HashEntry entry) throws IOException {
HashPageInfo info = null;
if (next == null) {
HashPage page = hashIndex.createPage(id);
info = addHashPageInfo(page.getId(), 0);
info.setPage(page);
} else {
info = next;
}
info.begin();
info.addHashEntry(0, entry);
if (info.size() > maximumEntries) {
// overflowed
HashEntry overflowed = info.removeHashEntry(info.size() - 1);
doOverFlow(getNextPage(info), overflowed);
}
}
private HashPageInfo getNextPage(HashPageInfo start) {
return (HashPageInfo) start.getNext();
}
private void doUnderFlow(int index) {
}
String dump() throws IOException {
String str = "[" + this.numberOfPages+"]";
HashPageInfo page = root;
while (page != null) {
page.begin();
str +=page.dump();
page.end();
page = (HashPageInfo) page.getNext();
}
return str;
}
private void end() throws IOException {
HashPageInfo page = root;
while (page != null) {
page.end();
page = (HashPageInfo) page.getNext();
}
}
}

View File

@ -1,100 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.index.hash;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.activemq.kaha.Marshaller;
/**
* Key and index for DiskBased Hash Index
*
*
*/
class HashEntry implements Comparable {
static final int NOT_SET = -1;
private Comparable key;
private long indexOffset;
public int compareTo(Object o) {
if (o instanceof HashEntry) {
HashEntry other = (HashEntry)o;
return key.compareTo(other.key);
} else {
return key.compareTo(o);
}
}
public boolean equals(Object o) {
return compareTo(o) == 0;
}
public int hashCode() {
return key.hashCode();
}
public String toString() {
return "HashEntry(" + key + "," + indexOffset + ")";
}
HashEntry copy() {
HashEntry copy = new HashEntry();
copy.key = this.key;
copy.indexOffset = this.indexOffset;
return copy;
}
/**
* @return the key
*/
Comparable getKey() {
return this.key;
}
/**
* @param key the key to set
*/
void setKey(Comparable key) {
this.key = key;
}
/**
* @return the indexOffset
*/
long getIndexOffset() {
return this.indexOffset;
}
/**
* @param indexOffset the indexOffset to set
*/
void setIndexOffset(long indexOffset) {
this.indexOffset = indexOffset;
}
void write(Marshaller keyMarshaller, DataOutput dataOut) throws IOException {
dataOut.writeLong(indexOffset);
keyMarshaller.writePayload(key, dataOut);
}
void read(Marshaller keyMarshaller, DataInput dataIn) throws IOException {
indexOffset = dataIn.readLong();
key = (Comparable)keyMarshaller.readPayload(dataIn);
}
}

View File

@ -1,617 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.index.hash;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.util.LinkedList;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.activemq.kaha.Marshaller;
import org.apache.activemq.kaha.StoreEntry;
import org.apache.activemq.kaha.impl.index.Index;
import org.apache.activemq.kaha.impl.index.IndexManager;
import org.apache.activemq.util.DataByteArrayInputStream;
import org.apache.activemq.util.DataByteArrayOutputStream;
import org.apache.activemq.util.IOHelper;
import org.apache.activemq.util.LRUCache;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* BTree implementation
*
*
*/
public class HashIndex implements Index, HashIndexMBean {
public static final int DEFAULT_PAGE_SIZE;
public static final int DEFAULT_KEY_SIZE;
public static final int DEFAULT_BIN_SIZE;
public static final int MAXIMUM_CAPACITY;
public static final int DEFAULT_LOAD_FACTOR;
private static final int LOW_WATER_MARK=1024*16;
private static final String NAME_PREFIX = "hash-index-";
private static final Logger LOG = LoggerFactory.getLogger(HashIndex.class);
private final String name;
private File directory;
private File file;
private RandomAccessFile indexFile;
private IndexManager indexManager;
private int pageSize = DEFAULT_PAGE_SIZE;
private int keySize = DEFAULT_KEY_SIZE;
private int numberOfBins = DEFAULT_BIN_SIZE;
private int keysPerPage = this.pageSize /this.keySize;
private DataByteArrayInputStream dataIn;
private DataByteArrayOutputStream dataOut;
private byte[] readBuffer;
private HashBin[] bins;
private Marshaller keyMarshaller;
private long length;
private LinkedList<HashPage> freeList = new LinkedList<HashPage>();
private AtomicBoolean loaded = new AtomicBoolean();
private LRUCache<Long, HashPage> pageCache;
private boolean enablePageCaching=false;//this is off by default - see AMQ-1667
private int pageCacheSize = 10;
private int size;
private int highestSize=0;
private int activeBins;
private int threshold;
private int maximumCapacity=MAXIMUM_CAPACITY;
private int loadFactor=DEFAULT_LOAD_FACTOR;
/**
* Constructor
*
* @param directory
* @param name
* @param indexManager
* @throws IOException
*/
public HashIndex(File directory, String name, IndexManager indexManager) throws IOException {
this.directory = directory;
this.name = name;
this.indexManager = indexManager;
openIndexFile();
pageCache = new LRUCache<Long, HashPage>(pageCacheSize, pageCacheSize, 0.75f, true);
}
/**
* Set the marshaller for key objects
*
* @param marshaller
*/
public synchronized void setKeyMarshaller(Marshaller marshaller) {
this.keyMarshaller = marshaller;
}
/**
* @return the keySize
*/
public synchronized int getKeySize() {
return this.keySize;
}
/**
* @param keySize the keySize to set
*/
public synchronized void setKeySize(int keySize) {
this.keySize = keySize;
if (loaded.get()) {
throw new RuntimeException("Pages already loaded - can't reset key size");
}
}
/**
* @return the pageSize
*/
public synchronized int getPageSize() {
return this.pageSize;
}
/**
* @param pageSize the pageSize to set
*/
public synchronized void setPageSize(int pageSize) {
if (loaded.get() && pageSize != this.pageSize) {
throw new RuntimeException("Pages already loaded - can't reset page size");
}
this.pageSize = pageSize;
}
/**
* @return number of bins
*/
public int getNumberOfBins() {
return this.numberOfBins;
}
/**
* @param numberOfBins
*/
public void setNumberOfBins(int numberOfBins) {
if (loaded.get() && numberOfBins != this.numberOfBins) {
throw new RuntimeException("Pages already loaded - can't reset bin size");
}
this.numberOfBins = numberOfBins;
}
/**
* @return the enablePageCaching
*/
public synchronized boolean isEnablePageCaching() {
return this.enablePageCaching;
}
/**
* @param enablePageCaching the enablePageCaching to set
*/
public synchronized void setEnablePageCaching(boolean enablePageCaching) {
this.enablePageCaching = enablePageCaching;
}
/**
* @return the pageCacheSize
*/
public synchronized int getPageCacheSize() {
return this.pageCacheSize;
}
/**
* @param pageCacheSize the pageCacheSize to set
*/
public synchronized void setPageCacheSize(int pageCacheSize) {
this.pageCacheSize = pageCacheSize;
pageCache.setMaxCacheSize(pageCacheSize);
}
public synchronized boolean isTransient() {
return false;
}
/**
* @return the threshold
*/
public int getThreshold() {
return threshold;
}
/**
* @param threshold the threshold to set
*/
public void setThreshold(int threshold) {
this.threshold = threshold;
}
/**
* @return the loadFactor
*/
public int getLoadFactor() {
return loadFactor;
}
/**
* @param loadFactor the loadFactor to set
*/
public void setLoadFactor(int loadFactor) {
this.loadFactor = loadFactor;
}
/**
* @return the maximumCapacity
*/
public int getMaximumCapacity() {
return maximumCapacity;
}
/**
* @param maximumCapacity the maximumCapacity to set
*/
public void setMaximumCapacity(int maximumCapacity) {
this.maximumCapacity = maximumCapacity;
}
public synchronized int getSize() {
return size;
}
public synchronized int getActiveBins(){
return activeBins;
}
public synchronized void load() {
if (loaded.compareAndSet(false, true)) {
int capacity = 1;
while (capacity < numberOfBins) {
capacity <<= 1;
}
this.bins = new HashBin[capacity];
this.numberOfBins=capacity;
threshold = calculateThreashold();
keysPerPage = pageSize / keySize;
dataIn = new DataByteArrayInputStream();
dataOut = new DataByteArrayOutputStream(pageSize);
readBuffer = new byte[pageSize];
try {
openIndexFile();
if (indexFile.length() > 0) {
doCompress();
}
} catch (IOException e) {
LOG.error("Failed to load index ", e);
throw new RuntimeException(e);
}
}
}
public synchronized void unload() throws IOException {
if (loaded.compareAndSet(true, false)) {
if (indexFile != null) {
indexFile.close();
indexFile = null;
freeList.clear();
pageCache.clear();
bins = new HashBin[bins.length];
}
}
}
public synchronized void store(Object key, StoreEntry value) throws IOException {
load();
HashEntry entry = new HashEntry();
entry.setKey((Comparable)key);
entry.setIndexOffset(value.getOffset());
if (!getBin(key).put(entry)) {
this.size++;
}
if (this.size >= this.threshold) {
resize(2*bins.length);
}
if(this.size > this.highestSize) {
this.highestSize=this.size;
}
}
public synchronized StoreEntry get(Object key) throws IOException {
load();
HashEntry entry = new HashEntry();
entry.setKey((Comparable)key);
HashEntry result = getBin(key).find(entry);
return result != null ? indexManager.getIndex(result.getIndexOffset()) : null;
}
public synchronized StoreEntry remove(Object key) throws IOException {
load();
StoreEntry result = null;
HashEntry entry = new HashEntry();
entry.setKey((Comparable)key);
HashEntry he = getBin(key).remove(entry);
if (he != null) {
this.size--;
result = this.indexManager.getIndex(he.getIndexOffset());
}
if (this.highestSize > LOW_WATER_MARK && this.highestSize > (this.size *2)) {
int newSize = this.size/this.keysPerPage;
newSize = Math.max(128, newSize);
this.highestSize=0;
resize(newSize);
}
return result;
}
public synchronized boolean containsKey(Object key) throws IOException {
return get(key) != null;
}
public synchronized void clear() throws IOException {
unload();
delete();
openIndexFile();
load();
}
public synchronized void delete() throws IOException {
unload();
if (file.exists()) {
file.delete();
}
length = 0;
}
HashPage lookupPage(long pageId) throws IOException {
HashPage result = null;
if (pageId >= 0) {
result = getFromCache(pageId);
if (result == null) {
result = getFullPage(pageId);
if (result != null) {
if (result.isActive()) {
addToCache(result);
} else {
throw new IllegalStateException("Trying to access an inactive page: " + pageId);
}
}
}
}
return result;
}
HashPage createPage(int binId) throws IOException {
HashPage result = getNextFreePage();
if (result == null) {
// allocate one
result = new HashPage(keysPerPage);
result.setId(length);
result.setBinId(binId);
writePageHeader(result);
length += pageSize;
indexFile.seek(length);
indexFile.write(HashEntry.NOT_SET);
}
addToCache(result);
return result;
}
void releasePage(HashPage page) throws IOException {
removeFromCache(page);
page.reset();
page.setActive(false);
writePageHeader(page);
freeList.add(page);
}
private HashPage getNextFreePage() throws IOException {
HashPage result = null;
if(!freeList.isEmpty()) {
result = freeList.removeFirst();
result.setActive(true);
result.reset();
writePageHeader(result);
}
return result;
}
void writeFullPage(HashPage page) throws IOException {
dataOut.reset();
page.write(keyMarshaller, dataOut);
if (dataOut.size() > pageSize) {
throw new IOException("Page Size overflow: pageSize is " + pageSize + " trying to write " + dataOut.size());
}
indexFile.seek(page.getId());
indexFile.write(dataOut.getData(), 0, dataOut.size());
}
void writePageHeader(HashPage page) throws IOException {
dataOut.reset();
page.writeHeader(dataOut);
indexFile.seek(page.getId());
indexFile.write(dataOut.getData(), 0, HashPage.PAGE_HEADER_SIZE);
}
HashPage getFullPage(long id) throws IOException {
indexFile.seek(id);
indexFile.readFully(readBuffer, 0, pageSize);
dataIn.restart(readBuffer);
HashPage page = new HashPage(keysPerPage);
page.setId(id);
page.read(keyMarshaller, dataIn);
return page;
}
HashPage getPageHeader(long id) throws IOException {
indexFile.seek(id);
indexFile.readFully(readBuffer, 0, HashPage.PAGE_HEADER_SIZE);
dataIn.restart(readBuffer);
HashPage page = new HashPage(keysPerPage);
page.setId(id);
page.readHeader(dataIn);
return page;
}
void addToBin(HashPage page) throws IOException {
int index = page.getBinId();
if (index >= this.bins.length) {
resize(index+1);
}
HashBin bin = getBin(index);
bin.addHashPageInfo(page.getId(), page.getPersistedSize());
}
private HashBin getBin(int index) {
HashBin result = bins[index];
if (result == null) {
result = new HashBin(this, index, pageSize / keySize);
bins[index] = result;
activeBins++;
}
return result;
}
private void openIndexFile() throws IOException {
if (indexFile == null) {
file = new File(directory, NAME_PREFIX + IOHelper.toFileSystemSafeName(name));
IOHelper.mkdirs(file.getParentFile());
indexFile = new RandomAccessFile(file, "rw");
}
}
private HashBin getBin(Object key) {
int hash = hash(key);
int i = indexFor(hash, bins.length);
return getBin(i);
}
private HashPage getFromCache(long pageId) {
HashPage result = null;
if (enablePageCaching) {
result = pageCache.get(pageId);
}
return result;
}
private void addToCache(HashPage page) {
if (enablePageCaching) {
pageCache.put(page.getId(), page);
}
}
private void removeFromCache(HashPage page) {
if (enablePageCaching) {
pageCache.remove(page.getId());
}
}
private void doLoad() throws IOException {
long offset = 0;
if (loaded.compareAndSet(false, true)) {
while ((offset + pageSize) <= indexFile.length()) {
indexFile.seek(offset);
indexFile.readFully(readBuffer, 0, HashPage.PAGE_HEADER_SIZE);
dataIn.restart(readBuffer);
HashPage page = new HashPage(keysPerPage);
page.setId(offset);
page.readHeader(dataIn);
if (!page.isActive()) {
page.reset();
freeList.add(page);
} else {
addToBin(page);
size+=page.size();
}
offset += pageSize;
}
length=offset;
}
}
private void doCompress() throws IOException {
String backFileName = name + "-COMPRESS";
HashIndex backIndex = new HashIndex(directory,backFileName,indexManager);
backIndex.setKeyMarshaller(keyMarshaller);
backIndex.setKeySize(getKeySize());
backIndex.setNumberOfBins(getNumberOfBins());
backIndex.setPageSize(getPageSize());
backIndex.load();
File backFile = backIndex.file;
long offset = 0;
while ((offset + pageSize) <= indexFile.length()) {
indexFile.seek(offset);
HashPage page = getFullPage(offset);
if (page.isActive()) {
for (HashEntry entry : page.getEntries()) {
backIndex.getBin(entry.getKey()).put(entry);
backIndex.size++;
}
}
page=null;
offset += pageSize;
}
backIndex.unload();
unload();
IOHelper.deleteFile(file);
IOHelper.copyFile(backFile, file);
IOHelper.deleteFile(backFile);
openIndexFile();
doLoad();
}
private void resize(int newCapacity) throws IOException {
if (bins.length < getMaximumCapacity()) {
if (newCapacity != numberOfBins) {
int capacity = 1;
while (capacity < newCapacity) {
capacity <<= 1;
}
newCapacity=capacity;
if (newCapacity != numberOfBins) {
LOG.info("Resize hash bins " + this.name + " from " + numberOfBins + " to " + newCapacity);
String backFileName = name + "-REISZE";
HashIndex backIndex = new HashIndex(directory,backFileName,indexManager);
backIndex.setKeyMarshaller(keyMarshaller);
backIndex.setKeySize(getKeySize());
backIndex.setNumberOfBins(newCapacity);
backIndex.setPageSize(getPageSize());
backIndex.load();
File backFile = backIndex.file;
long offset = 0;
while ((offset + pageSize) <= indexFile.length()) {
indexFile.seek(offset);
HashPage page = getFullPage(offset);
if (page.isActive()) {
for (HashEntry entry : page.getEntries()) {
backIndex.getBin(entry.getKey()).put(entry);
backIndex.size++;
}
}
page=null;
offset += pageSize;
}
backIndex.unload();
unload();
IOHelper.deleteFile(file);
IOHelper.copyFile(backFile, file);
IOHelper.deleteFile(backFile);
setNumberOfBins(newCapacity);
bins = new HashBin[newCapacity];
threshold = calculateThreashold();
openIndexFile();
doLoad();
}
}
}else {
threshold = Integer.MAX_VALUE;
return;
}
}
private int calculateThreashold() {
return (int)(bins.length * loadFactor);
}
public String toString() {
String str = "HashIndex"+System.identityHashCode(this)+": "+file.getName();
return str;
}
static int hash(Object x) {
int h = x.hashCode();
h += ~(h << 9);
h ^= h >>> 14;
h += h << 4;
h ^= h >>> 10;
return h;
}
static int indexFor(int h, int length) {
return h & (length - 1);
}
static {
DEFAULT_PAGE_SIZE = Integer.parseInt(System.getProperty("defaultPageSize", "1024"));
DEFAULT_KEY_SIZE = Integer.parseInt(System.getProperty("defaultKeySize", "96"));
DEFAULT_BIN_SIZE= Integer.parseInt(System.getProperty("defaultBinSize", "1024"));
MAXIMUM_CAPACITY = Integer.parseInt(System.getProperty("maximumCapacity", "16384"));
DEFAULT_LOAD_FACTOR=Integer.parseInt(System.getProperty("defaultLoadFactor","50"));
}
}

View File

@ -1,70 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.index.hash;
import org.apache.activemq.kaha.IndexMBean;
/**
* MBean for HashIndex
*
*/
public interface HashIndexMBean extends IndexMBean{
/**
* @return the keySize
*/
public int getKeySize();
/**
* @param keySize the keySize to set
*/
public void setKeySize(int keySize);
/**
* @return the page size
*/
public int getPageSize();
/**
* @return number of bins
*/
public int getNumberOfBins();
/**
* @return the enablePageCaching
*/
public boolean isEnablePageCaching();
/**
* @return the pageCacheSize
*/
public int getPageCacheSize();
/**
* @return size
*/
public int getSize();
/**
* @return the number of active bins
*/
public int getActiveBins();
}

View File

@ -1,223 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.index.hash;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.activemq.kaha.Marshaller;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A Page within a HashPage
*
*
*/
class HashPage {
static final int PAGE_HEADER_SIZE = 17;
private static final transient Logger LOG = LoggerFactory.getLogger(HashPage.class);
private int maximumEntries;
private long id;
private int binId;
private List<HashEntry> hashIndexEntries;
private int persistedSize;
/*
* for persistence only
*/
private long nextFreePageId = HashEntry.NOT_SET;
private boolean active = true;
/**
* Constructor
*
* @param maximumEntries
*/
public HashPage(int maximumEntries) {
this.maximumEntries = maximumEntries;
this.hashIndexEntries = new ArrayList<HashEntry>(maximumEntries);
}
public String toString() {
return "HashPage[" + getId() + ":" + binId + ":" + id+"] size = " + persistedSize;
}
public boolean equals(Object o) {
boolean result = false;
if (o instanceof HashPage) {
HashPage other = (HashPage)o;
result = other.id == id;
}
return result;
}
public int hashCode() {
return (int)id;
}
boolean isActive() {
return this.active;
}
void setActive(boolean active) {
this.active = active;
}
long getId() {
return id;
}
void setId(long id) {
this.id = id;
}
int getPersistedSize() {
return persistedSize;
}
void write(Marshaller keyMarshaller, DataOutput dataOut) throws IOException {
persistedSize=hashIndexEntries.size();
writeHeader(dataOut);
dataOut.writeInt(persistedSize);
for (HashEntry entry : hashIndexEntries) {
entry.write(keyMarshaller, dataOut);
}
}
void read(Marshaller keyMarshaller, DataInput dataIn) throws IOException {
readHeader(dataIn);
dataIn.readInt();
int size = persistedSize;
hashIndexEntries.clear();
for (int i = 0; i < size; i++) {
HashEntry entry = new HashEntry();
entry.read(keyMarshaller, dataIn);
hashIndexEntries.add(entry);
}
}
void readHeader(DataInput dataIn) throws IOException {
active = dataIn.readBoolean();
nextFreePageId = dataIn.readLong();
binId = dataIn.readInt();
persistedSize = dataIn.readInt();
}
void writeHeader(DataOutput dataOut) throws IOException {
dataOut.writeBoolean(isActive());
dataOut.writeLong(nextFreePageId);
dataOut.writeInt(binId);
persistedSize=hashIndexEntries.size();
dataOut.writeInt(persistedSize);
}
boolean isEmpty() {
return hashIndexEntries.isEmpty();
}
boolean isFull() {
return hashIndexEntries.size() >= maximumEntries;
}
boolean isUnderflowed() {
return hashIndexEntries.size() < (maximumEntries / 2);
}
boolean isOverflowed() {
return hashIndexEntries.size() > maximumEntries;
}
List<HashEntry> getEntries() {
return hashIndexEntries;
}
void setEntries(List<HashEntry> newEntries) {
this.hashIndexEntries = newEntries;
}
int getMaximumEntries() {
return this.maximumEntries;
}
void setMaximumEntries(int maximumEntries) {
this.maximumEntries = maximumEntries;
}
int size() {
return hashIndexEntries.size();
}
void reset() throws IOException {
hashIndexEntries.clear();
persistedSize=0;
}
void addHashEntry(int index, HashEntry entry) throws IOException {
hashIndexEntries.add(index, entry);
}
HashEntry getHashEntry(int index) {
HashEntry result = hashIndexEntries.get(index);
return result;
}
HashEntry removeHashEntry(int index) throws IOException {
HashEntry result = hashIndexEntries.remove(index);
return result;
}
void removeAllTreeEntries(List<HashEntry> c) {
hashIndexEntries.removeAll(c);
}
List<HashEntry> getSubList(int from, int to) {
return new ArrayList<HashEntry>(hashIndexEntries.subList(from, to));
}
/**
* @return the binId
*/
int getBinId() {
return this.binId;
}
/**
* @param binId the binId to set
*/
void setBinId(int binId) {
this.binId = binId;
}
String dump() {
StringBuffer str = new StringBuffer(32);
str.append(toString());
str.append(": ");
for (HashEntry entry : hashIndexEntries) {
str.append(entry);
str.append(",");
}
return str.toString();
}
}

View File

@ -1,121 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.index.hash;
import java.io.IOException;
import org.apache.activemq.util.LinkedNode;
/**
* A Page within a HashPageInfo
*
*
*/
class HashPageInfo extends LinkedNode{
private HashIndex hashIndex;
private long id;
private int size;
private HashPage page;
private boolean dirty;
HashPageInfo(HashIndex index) {
this.hashIndex = index;
}
/**
* @return the id
*/
long getId() {
return this.id;
}
/**
* @param id the id to set
*/
void setId(long id) {
this.id = id;
}
/**
* @return the size
*/
int size() {
return this.size;
}
boolean isEmpty() {
return size <= 0;
}
/**
* @param size the size to set
*/
void setSize(int size) {
this.size = size;
}
void addHashEntry(int index, HashEntry entry) throws IOException {
page.addHashEntry(index, entry);
size=page.size();
dirty = true;
}
HashEntry getHashEntry(int index) throws IOException {
return page.getHashEntry(index);
}
HashEntry removeHashEntry(int index) throws IOException {
HashEntry result = page.removeHashEntry(index);
if (result != null) {
size=page.size();
dirty = true;
}
return result;
}
String dump() {
return page.dump();
}
void begin() throws IOException {
if (page == null) {
page = hashIndex.lookupPage(id);
}
}
void end() throws IOException {
if (page != null) {
if (dirty) {
hashIndex.writeFullPage(page);
}
}
page = null;
dirty = false;
}
HashPage getPage() {
return page;
}
void setPage(HashPage page) {
this.page = page;
}
public String toString() {
return "Page["+id+"] size=" + size;
}
}

View File

@ -1,25 +0,0 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<html>
<head>
</head>
<body>
disk based Hash implementation of an index for a Map
</body>
</html>

View File

@ -1,25 +0,0 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<html>
<head>
</head>
<body>
Kaha index - type classes for the Map Container - including VM implementation
</body>
</html>

View File

@ -1,146 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.index.tree;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.activemq.kaha.Marshaller;
/**
* Key and index for a BTree
*
*
*/
class TreeEntry implements Comparable {
static final int NOT_SET = -1;
private Comparable key;
private long indexOffset;
private long prevPageId = NOT_SET;
private long nextPageId = NOT_SET;
public int compareTo(Object o) {
if (o instanceof TreeEntry) {
TreeEntry other = (TreeEntry)o;
return key.compareTo(other.key);
} else {
return key.compareTo(o);
}
}
public boolean equals(Object o) {
return compareTo(o) == 0;
}
public int hashCode() {
return key.hashCode();
}
public String toString() {
return "TreeEntry(" + key + "," + indexOffset + ")prev=" + prevPageId + ",next=" + nextPageId;
}
void reset() {
prevPageId = NOT_SET;
nextPageId = NOT_SET;
}
TreeEntry copy() {
TreeEntry copy = new TreeEntry();
copy.key = this.key;
copy.indexOffset = this.indexOffset;
copy.prevPageId = this.prevPageId;
copy.nextPageId = this.nextPageId;
return copy;
}
/**
* @return the key
*/
Comparable getKey() {
return this.key;
}
/**
* @param key the key to set
*/
void setKey(Comparable key) {
this.key = key;
}
/**
* @return the nextPageId
*/
long getNextPageId() {
return this.nextPageId;
}
/**
* @param nextPageId the nextPageId to set
*/
void setNextPageId(long nextPageId) {
this.nextPageId = nextPageId;
}
/**
* @return the prevPageId
*/
long getPrevPageId() {
return this.prevPageId;
}
/**
* @param prevPageId the prevPageId to set
*/
void setPrevPageId(long prevPageId) {
this.prevPageId = prevPageId;
}
/**
* @return the indexOffset
*/
long getIndexOffset() {
return this.indexOffset;
}
/**
* @param indexOffset the indexOffset to set
*/
void setIndexOffset(long indexOffset) {
this.indexOffset = indexOffset;
}
boolean hasChildPagesReferences() {
return prevPageId != NOT_SET || nextPageId != NOT_SET;
}
void write(Marshaller keyMarshaller, DataOutput dataOut) throws IOException {
keyMarshaller.writePayload(key, dataOut);
dataOut.writeLong(indexOffset);
dataOut.writeLong(nextPageId);
dataOut.writeLong(prevPageId);
}
void read(Marshaller keyMarshaller, DataInput dataIn) throws IOException {
key = (Comparable)keyMarshaller.readPayload(dataIn);
indexOffset = dataIn.readLong();
nextPageId = dataIn.readLong();
prevPageId = dataIn.readLong();
}
}

View File

@ -1,420 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.index.tree;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.activemq.kaha.Marshaller;
import org.apache.activemq.kaha.StoreEntry;
import org.apache.activemq.kaha.impl.index.Index;
import org.apache.activemq.kaha.impl.index.IndexManager;
import org.apache.activemq.util.DataByteArrayInputStream;
import org.apache.activemq.util.DataByteArrayOutputStream;
import org.apache.activemq.util.IOHelper;
import org.apache.activemq.util.LRUCache;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* BTree implementation
*
*
*/
public class TreeIndex implements Index {
private static final String NAME_PREFIX = "tree-index-";
private static final int DEFAULT_PAGE_SIZE;
private static final int DEFAULT_KEY_SIZE;
private static final Logger LOG = LoggerFactory.getLogger(TreeIndex.class);
private final String name;
private File directory;
private File file;
private RandomAccessFile indexFile;
private IndexManager indexManager;
private int pageSize = DEFAULT_PAGE_SIZE;
private int keySize = DEFAULT_KEY_SIZE;
private int keysPerPage = pageSize / keySize;
private TreePage root;
private LRUCache<Long, TreePage> pageCache;
private DataByteArrayInputStream dataIn;
private DataByteArrayOutputStream dataOut;
private byte[] readBuffer;
private Marshaller keyMarshaller;
private long length;
private TreePage firstFree;
private TreePage lastFree;
private AtomicBoolean loaded = new AtomicBoolean();
private boolean enablePageCaching = true;
private int pageCacheSize = 10;
/**
* Constructor
*
* @param directory
* @param name
* @param indexManager
* @throws IOException
*/
public TreeIndex(File directory, String name, IndexManager indexManager) throws IOException {
this.directory = directory;
this.name = name;
this.indexManager = indexManager;
pageCache = new LRUCache<Long, TreePage>(pageCacheSize, pageCacheSize, 0.75f, true);
openIndexFile();
}
/**
* Set the marshaller for key objects
*
* @param marshaller
*/
public void setKeyMarshaller(Marshaller marshaller) {
this.keyMarshaller = marshaller;
}
/**
* @return the keySize
*/
public int getKeySize() {
return this.keySize;
}
/**
* @param keySize the keySize to set
*/
public void setKeySize(int keySize) {
this.keySize = keySize;
if (loaded.get()) {
throw new RuntimeException("Pages already loaded - can't reset key size");
}
}
/**
* @return the pageSize
*/
public int getPageSize() {
return this.pageSize;
}
/**
* @param pageSize the pageSize to set
*/
public void setPageSize(int pageSize) {
if (loaded.get() && pageSize != this.pageSize) {
throw new RuntimeException("Pages already loaded - can't reset page size");
}
this.pageSize = pageSize;
}
public boolean isTransient() {
return false;
}
/**
* @return the enablePageCaching
*/
public boolean isEnablePageCaching() {
return this.enablePageCaching;
}
/**
* @param enablePageCaching the enablePageCaching to set
*/
public void setEnablePageCaching(boolean enablePageCaching) {
this.enablePageCaching = enablePageCaching;
}
/**
* @return the pageCacheSize
*/
public int getPageCacheSize() {
return this.pageCacheSize;
}
/**
* @param pageCacheSize the pageCacheSize to set
*/
public void setPageCacheSize(int pageCacheSize) {
this.pageCacheSize = pageCacheSize;
pageCache.setMaxCacheSize(pageCacheSize);
}
public void load() {
if (loaded.compareAndSet(false, true)) {
keysPerPage = pageSize / keySize;
dataIn = new DataByteArrayInputStream();
dataOut = new DataByteArrayOutputStream(pageSize);
readBuffer = new byte[pageSize];
try {
openIndexFile();
long offset = 0;
while ((offset + pageSize) <= indexFile.length()) {
indexFile.seek(offset);
indexFile.readFully(readBuffer, 0, TreePage.PAGE_HEADER_SIZE);
dataIn.restart(readBuffer);
TreePage page = new TreePage(keysPerPage);
page.setTree(this);
page.setId(offset);
page.readHeader(dataIn);
if (!page.isActive()) {
if (lastFree != null) {
lastFree.setNextFreePageId(offset);
indexFile.seek(lastFree.getId());
dataOut.reset();
lastFree.writeHeader(dataOut);
indexFile.write(dataOut.getData(), 0, TreePage.PAGE_HEADER_SIZE);
lastFree = page;
} else {
lastFree = page;
firstFree = page;
}
} else if (root == null && page.isRoot()) {
root = getFullPage(offset);
}
offset += pageSize;
}
length = offset;
if (root == null) {
root = createRoot();
}
} catch (IOException e) {
LOG.error("Failed to load index ", e);
throw new RuntimeException(e);
}
}
}
public void unload() throws IOException {
if (loaded.compareAndSet(true, false)) {
if (indexFile != null) {
indexFile.close();
indexFile = null;
pageCache.clear();
root = null;
firstFree = null;
lastFree = null;
}
}
}
public void store(Object key, StoreEntry value) throws IOException {
TreeEntry entry = new TreeEntry();
entry.setKey((Comparable)key);
entry.setIndexOffset(value.getOffset());
root.put(entry);
}
public StoreEntry get(Object key) throws IOException {
TreeEntry entry = new TreeEntry();
entry.setKey((Comparable)key);
TreeEntry result = root.find(entry);
return result != null ? indexManager.getIndex(result.getIndexOffset()) : null;
}
public StoreEntry remove(Object key) throws IOException {
TreeEntry entry = new TreeEntry();
entry.setKey((Comparable)key);
TreeEntry result = root.remove(entry);
return result != null ? indexManager.getIndex(result.getIndexOffset()) : null;
}
public boolean containsKey(Object key) throws IOException {
TreeEntry entry = new TreeEntry();
entry.setKey((Comparable)key);
return root.find(entry) != null;
}
public void clear() throws IOException {
unload();
delete();
openIndexFile();
load();
}
public void delete() throws IOException {
unload();
if (file.exists()) {
boolean result = file.delete();
}
length = 0;
}
/**
* @return the root
*/
TreePage getRoot() {
return this.root;
}
TreePage lookupPage(long pageId) throws IOException {
TreePage result = null;
if (pageId >= 0) {
if (root != null && root.getId() == pageId) {
result = root;
} else {
result = getFromCache(pageId);
}
if (result == null) {
result = getFullPage(pageId);
if (result != null) {
if (result.isActive()) {
addToCache(result);
} else {
throw new IllegalStateException("Trying to access an inactive page: " + pageId + " root is " + root);
}
}
}
}
return result;
}
TreePage createRoot() throws IOException {
TreePage result = createPage(-1);
root = result;
return result;
}
TreePage createPage(long parentId) throws IOException {
TreePage result = getNextFreePage();
if (result == null) {
// allocate one
result = new TreePage(keysPerPage);
result.setId(length);
result.setTree(this);
result.setParentId(parentId);
writePage(result);
length += pageSize;
indexFile.seek(length);
indexFile.write(TreeEntry.NOT_SET);
}
addToCache(result);
return result;
}
void releasePage(TreePage page) throws IOException {
removeFromCache(page);
page.reset();
page.setActive(false);
if (lastFree == null) {
firstFree = page;
lastFree = page;
} else {
lastFree.setNextFreePageId(page.getId());
writePage(lastFree);
}
writePage(page);
}
private TreePage getNextFreePage() throws IOException {
TreePage result = null;
if (firstFree != null) {
if (firstFree.equals(lastFree)) {
result = firstFree;
firstFree = null;
lastFree = null;
} else {
result = firstFree;
firstFree = getPage(firstFree.getNextFreePageId());
if (firstFree == null) {
lastFree = null;
}
}
result.setActive(true);
result.reset();
result.saveHeader();
}
return result;
}
void writeFullPage(TreePage page) throws IOException {
dataOut.reset();
page.write(keyMarshaller, dataOut);
if (dataOut.size() > pageSize) {
throw new IOException("Page Size overflow: pageSize is " + pageSize + " trying to write " + dataOut.size());
}
indexFile.seek(page.getId());
indexFile.write(dataOut.getData(), 0, dataOut.size());
}
void writePage(TreePage page) throws IOException {
dataOut.reset();
page.writeHeader(dataOut);
indexFile.seek(page.getId());
indexFile.write(dataOut.getData(), 0, TreePage.PAGE_HEADER_SIZE);
}
TreePage getFullPage(long id) throws IOException {
indexFile.seek(id);
indexFile.readFully(readBuffer, 0, pageSize);
dataIn.restart(readBuffer);
TreePage page = new TreePage(keysPerPage);
page.setId(id);
page.setTree(this);
page.read(keyMarshaller, dataIn);
return page;
}
TreePage getPage(long id) throws IOException {
indexFile.seek(id);
indexFile.readFully(readBuffer, 0, TreePage.PAGE_HEADER_SIZE);
dataIn.restart(readBuffer);
TreePage page = new TreePage(keysPerPage);
page.setId(id);
page.setTree(this);
page.readHeader(dataIn);
return page;
}
private TreePage getFromCache(long pageId) {
TreePage result = null;
if (enablePageCaching) {
result = pageCache.get(pageId);
}
return result;
}
private void addToCache(TreePage page) {
if (enablePageCaching) {
pageCache.put(page.getId(), page);
}
}
private void removeFromCache(TreePage page) {
if (enablePageCaching) {
pageCache.remove(page.getId());
}
}
protected void openIndexFile() throws IOException {
if (indexFile == null) {
file = new File(directory, NAME_PREFIX + IOHelper.toFileSystemSafeName(name));
IOHelper.mkdirs(file.getParentFile());
indexFile = new RandomAccessFile(file, "rw");
}
}
static {
DEFAULT_PAGE_SIZE = Integer.parseInt(System.getProperty("defaultPageSize", "16384"));
DEFAULT_KEY_SIZE = Integer.parseInt(System.getProperty("defaultKeySize", "96"));
}
public int getSize() {
return 0;
}
}

View File

@ -1,762 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.index.tree;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.activemq.kaha.Marshaller;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Page in a BTree
*
*
*/
class TreePage {
static final int PAGE_HEADER_SIZE = 18;
private static final transient Logger LOG = LoggerFactory.getLogger(TreePage.class);
static enum Flavour {
LESS, MORE
}
private TreeIndex tree;
private int maximumEntries;
private long id;
private long parentId = TreeEntry.NOT_SET;
private boolean leaf = true;
private List<TreeEntry> treeEntries;
/*
* for persistence only
*/
private long nextFreePageId = TreeEntry.NOT_SET;
private boolean active = true;
/**
* Constructor
*
* @param tree
* @param id
* @param parentId
* @param maximumEntries
*/
TreePage(TreeIndex tree, long id, long parentId, int maximumEntries) {
this(maximumEntries);
this.tree = tree;
this.id = id;
this.parentId = parentId;
}
/**
* Constructor
*
* @param maximumEntries
*/
public TreePage(int maximumEntries) {
this.maximumEntries = maximumEntries;
this.treeEntries = new ArrayList<TreeEntry>(maximumEntries);
}
public String toString() {
return "TreePage[" + getId() + "]parent=" + getParentId();
}
public boolean equals(Object o) {
boolean result = false;
if (o instanceof TreePage) {
TreePage other = (TreePage)o;
result = other.id == id;
}
return result;
}
public int hashCode() {
return (int)id;
}
boolean isActive() {
return this.active;
}
void setActive(boolean active) {
this.active = active;
}
long getNextFreePageId() {
return this.nextFreePageId;
}
void setNextFreePageId(long nextPageId) {
this.nextFreePageId = nextPageId;
}
long getId() {
return id;
}
void setId(long id) {
this.id = id;
}
void write(Marshaller keyMarshaller, DataOutput dataOut) throws IOException {
writeHeader(dataOut);
dataOut.writeInt(treeEntries.size());
for (TreeEntry entry : treeEntries) {
entry.write(keyMarshaller, dataOut);
}
}
void read(Marshaller keyMarshaller, DataInput dataIn) throws IOException {
readHeader(dataIn);
int size = dataIn.readInt();
treeEntries.clear();
for (int i = 0; i < size; i++) {
TreeEntry entry = new TreeEntry();
entry.read(keyMarshaller, dataIn);
treeEntries.add(entry);
}
}
void readHeader(DataInput dataIn) throws IOException {
active = dataIn.readBoolean();
leaf = dataIn.readBoolean();
setParentId(dataIn.readLong());
nextFreePageId = dataIn.readLong();
}
void writeHeader(DataOutput dataOut) throws IOException {
dataOut.writeBoolean(isActive());
dataOut.writeBoolean(isLeaf());
dataOut.writeLong(getParentId());
dataOut.writeLong(nextFreePageId);
}
boolean isEmpty() {
return treeEntries.isEmpty();
}
boolean isFull() {
return treeEntries.size() >= maximumEntries;
}
boolean isRoot() {
return getParentId() < 0;
}
boolean isLeaf() {
if (treeEntries.isEmpty()) {
leaf = true;
}
return leaf;
}
boolean isUnderflowed() {
return treeEntries.size() < (maximumEntries / 2);
}
boolean isOverflowed() {
return treeEntries.size() > maximumEntries;
}
void setLeaf(boolean newValue) {
this.leaf = newValue;
}
TreePage getParent() throws IOException {
return tree.lookupPage(parentId);
}
long getParentId() {
return parentId;
}
void setParentId(long newId) throws IOException {
if (newId == this.id) {
throw new IllegalStateException("Cannot set page as a child of itself " + this
+ " trying to set parentId = " + newId);
}
this.parentId = newId;
tree.writePage(this);
}
List<TreeEntry> getEntries() {
return treeEntries;
}
void setEntries(List<TreeEntry> newEntries) {
this.treeEntries = newEntries;
}
int getMaximumEntries() {
return this.maximumEntries;
}
void setMaximumEntries(int maximumEntries) {
this.maximumEntries = maximumEntries;
}
int size() {
return treeEntries.size();
}
TreeIndex getTree() {
return this.tree;
}
void setTree(TreeIndex tree) {
this.tree = tree;
}
void reset() throws IOException {
treeEntries.clear();
setParentId(TreeEntry.NOT_SET);
setNextFreePageId(TreeEntry.NOT_SET);
setLeaf(true);
}
public TreeEntry find(TreeEntry key) throws IOException {
int low = 0;
int high = size() - 1;
long pageId = -1;
while (low <= high) {
int mid = (low + high) >> 1;
TreeEntry te = getTreeEntry(mid);
int cmp = te.compareTo(key);
if (cmp == 0) {
return te;
} else if (cmp < 0) {
low = mid + 1;
pageId = te.getNextPageId();
} else {
high = mid - 1;
pageId = te.getPrevPageId();
}
}
TreePage page = tree.lookupPage(pageId);
if (page != null) {
return page.find(key);
}
return null;
}
TreeEntry put(TreeEntry newEntry) throws IOException {
TreeEntry result = null;
if (isRoot()) {
if (isEmpty()) {
insertTreeEntry(0, newEntry);
} else {
result = doInsert(null, newEntry);
}
} else {
throw new IllegalStateException("insert() should not be called on non root page - " + this);
}
return result;
}
TreeEntry remove(TreeEntry entry) throws IOException {
TreeEntry result = null;
if (isRoot()) {
if (!isEmpty()) {
result = doRemove(entry);
}
} else {
throw new IllegalStateException("remove() should not be called on non root page");
}
return result;
}
private TreeEntry doInsert(Flavour flavour, TreeEntry newEntry) throws IOException {
TreeEntry result = null;
TreePageEntry closest = findClosestEntry(newEntry);
if (closest != null) {
TreeEntry closestEntry = closest.getTreeEntry();
TreePage closestPage = closest.getTreePage();
int cmp = closestEntry.compareTo(newEntry);
if (cmp == 0) {
// we actually just need to pass back the value
long oldValue = closestEntry.getIndexOffset();
closestEntry.setIndexOffset(newEntry.getIndexOffset());
newEntry.setIndexOffset(oldValue);
result = newEntry;
save();
} else if (closestPage != null) {
result = closestPage.doInsert(closest.getFlavour(), newEntry);
} else {
if (!isFull()) {
insertTreeEntry(closest.getIndex(), newEntry);
save();
} else {
doOverflow(flavour, newEntry);
}
}
} else {
if (!isFull()) {
doInsertEntry(newEntry);
save();
} else {
// need to insert the new entry and propogate up the hightest
// value
doOverflow(flavour, newEntry);
}
}
return result;
}
private TreePage doOverflow(Flavour flavour, TreeEntry newEntry) throws IOException {
TreePage result = this;
TreeEntry theEntry = newEntry;
if (!isFull()) {
doInsertEntry(newEntry);
save();
} else {
if (!isRoot() && flavour != null) {
// we aren't the root, but to ensure the correct distribution we
// need to
// insert the new entry and take a node of the end of the page
// and pass that up the tree to find a home
doInsertEntry(newEntry);
if (flavour == Flavour.LESS) {
theEntry = removeTreeEntry(0);
theEntry.reset();
theEntry.setNextPageId(getId());
} else {
theEntry = removeTreeEntry(size() - 1);
theEntry.reset();
theEntry.setPrevPageId(getId());
}
save();
result = getParent().doOverflow(flavour, theEntry);
if (!theEntry.equals(newEntry)) {
// the newEntry stayed here
result = this;
}
} else {
// so we are the root and need to split
doInsertEntry(newEntry);
int midIndex = size() / 2;
TreeEntry midEntry = removeTreeEntry(midIndex);
List<TreeEntry> subList = getSubList(midIndex, size());
removeAllTreeEntries(subList);
TreePage newRoot = tree.createRoot();
newRoot.setLeaf(false);
this.setParentId(newRoot.getId());
save(); // we are no longer root - need to save - we maybe
// looked up v. soon!
TreePage rightPage = tree.createPage(newRoot.getId());
rightPage.setEntries(subList);
rightPage.checkLeaf();
resetParentId(rightPage.getId(), rightPage.getEntries());
midEntry.setNextPageId(rightPage.getId());
midEntry.setPrevPageId(this.getId());
newRoot.insertTreeEntry(0, midEntry);
resetParentId(newRoot.getId(), newRoot.getEntries());
save();
rightPage.save();
newRoot.save();
}
}
return result;
}
private TreeEntry doRemove(TreeEntry entry) throws IOException {
TreeEntry result = null;
TreePageEntry closest = findClosestEntry(entry);
if (closest != null) {
TreeEntry closestEntry = closest.getTreeEntry();
if (closestEntry != null) {
TreePage closestPage = closest.getTreePage();
int cmp = closestEntry.compareTo(entry);
if (cmp == 0) {
result = closest.getTreeEntry();
int index = closest.getIndex();
removeTreeEntry(index);
save();
// ensure we don't loose children
doUnderflow(result, index);
} else if (closestPage != null) {
closestPage.doRemove(entry);
}
}
}
return result;
}
/**
* @return true if the page is removed
* @throws IOException
*/
private boolean doUnderflow() throws IOException {
boolean result = false;
boolean working = true;
while (working && isUnderflowed() && !isEmpty() && !isLeaf()) {
int lastIndex = size() - 1;
TreeEntry entry = getTreeEntry(lastIndex);
working = doUnderflow(entry, lastIndex);
}
if (isUnderflowed() && isLeaf()) {
result = doUnderflowLeaf();
}
return result;
}
private boolean doUnderflow(TreeEntry entry, int index) throws IOException {
boolean result = false;
// pull an entry up from a leaf to fill the empty space
if (entry.getNextPageId() != TreeEntry.NOT_SET) {
TreePage page = tree.lookupPage(entry.getNextPageId());
if (page != null && !page.isEmpty()) {
TreeEntry replacement = page.removeTreeEntry(0);
TreeEntry copy = replacement.copy();
checkParentIdForRemovedPageEntry(copy, page.getId(), getId());
if (!page.isEmpty()) {
copy.setNextPageId(page.getId());
page.setParentId(this.id);
} else {
page.setLeaf(true);
}
int replacementIndex = doInsertEntry(copy);
if (page.doUnderflow()) {
// page removed so update our replacement
resetPageReference(replacementIndex, copy.getNextPageId());
copy.setNextPageId(TreeEntry.NOT_SET);
} else {
page.save();
}
save();
result = true;
}
}
// ensure we don't loose previous bit of the tree
if (entry.getPrevPageId() != TreeEntry.NOT_SET) {
TreeEntry prevEntry = (index > 0) ? getTreeEntry(index - 1) : null;
if (prevEntry == null || prevEntry.getNextPageId() != entry.getPrevPageId()) {
TreePage page = tree.lookupPage(entry.getPrevPageId());
if (page != null && !page.isEmpty()) {
TreeEntry replacement = page.removeTreeEntry(page.getEntries().size() - 1);
TreeEntry copy = replacement.copy();
// check children pages of the replacement point to the
// correct place
checkParentIdForRemovedPageEntry(copy, page.getId(), getId());
if (!page.isEmpty()) {
copy.setPrevPageId(page.getId());
} else {
page.setLeaf(true);
}
insertTreeEntry(index, copy);
// if we overflow - the page the replacement ends up on
TreePage landed = null;
TreeEntry removed = null;
if (isOverflowed()) {
TreePage parent = getParent();
if (parent != null) {
removed = getTreeEntry(0);
Flavour flavour = getFlavour(parent, removed);
if (flavour == Flavour.LESS) {
removed = removeTreeEntry(0);
landed = parent.doOverflow(flavour, removed);
} else {
removed = removeTreeEntry(size() - 1);
landed = parent.doOverflow(Flavour.MORE, removed);
}
}
}
if (page.doUnderflow()) {
if (landed == null || landed.equals(this)) {
landed = this;
}
resetPageReference(copy.getNextPageId());
landed.resetPageReference(copy.getNextPageId());
copy.setPrevPageId(TreeEntry.NOT_SET);
landed.save();
} else {
page.save();
}
save();
result = true;
}
// now we need to check we haven't overflowed this page
}
}
if (!result) {
save();
}
// now see if we need to save this page
result |= doUnderflowLeaf();
save();
return result;
}
private boolean doUnderflowLeaf() throws IOException {
boolean result = false;
// if we have unerflowed - and we are a leaf - push entries further up
// the tree
// and delete ourselves
if (isUnderflowed() && isLeaf()) {
List<TreeEntry> list = new ArrayList<TreeEntry>(treeEntries);
treeEntries.clear();
for (TreeEntry entry : list) {
// need to check for each iteration - we might get promoted to
// root
TreePage parent = getParent();
if (parent != null) {
Flavour flavour = getFlavour(parent, entry);
TreePage landedOn = parent.doOverflow(flavour, entry);
checkParentIdForRemovedPageEntry(entry, getId(), landedOn.getId());
}
}
TreePage parent = getParent();
if (parent != null) {
parent.checkLeaf();
parent.removePageId(getId());
parent.doUnderflow();
parent.save();
tree.releasePage(this);
result = true;
}
}
return result;
}
private Flavour getFlavour(TreePage page, TreeEntry entry) {
Flavour result = null;
if (page != null && !page.getEntries().isEmpty()) {
TreeEntry last = page.getEntries().get(page.getEntries().size() - 1);
if (last.compareTo(entry) > 0) {
result = Flavour.MORE;
} else {
result = Flavour.LESS;
}
}
return result;
}
private void checkLeaf() {
boolean result = false;
for (TreeEntry entry : treeEntries) {
if (entry.hasChildPagesReferences()) {
result = true;
break;
}
}
setLeaf(!result);
}
private void checkParentIdForRemovedPageEntry(TreeEntry entry, long oldPageId, long newPageId)
throws IOException {
TreePage page = tree.lookupPage(entry.getPrevPageId());
if (page != null && page.getParentId() == oldPageId) {
page.setParentId(newPageId);
page.save();
}
page = tree.lookupPage(entry.getNextPageId());
if (page != null && page.getParentId() == oldPageId) {
page.setParentId(newPageId);
page.save();
}
}
private void removePageId(long pageId) {
for (TreeEntry entry : treeEntries) {
if (entry.getNextPageId() == pageId) {
entry.setNextPageId(TreeEntry.NOT_SET);
}
if (entry.getPrevPageId() == pageId) {
entry.setPrevPageId(TreeEntry.NOT_SET);
}
}
}
private TreePageEntry findClosestEntry(TreeEntry key) throws IOException {
TreePageEntry result = null;
TreeEntry treeEntry = null;
Flavour flavour = null;
long pageId = -1;
int low = 0;
int high = size() - 1;
int mid = low;
while (low <= high) {
mid = (low + high) >> 1;
treeEntry = getTreeEntry(mid);
int cmp = treeEntry.compareTo(key);
if (cmp < 0) {
low = mid + 1;
pageId = treeEntry.getNextPageId();
flavour = Flavour.LESS;
} else if (cmp > 0) {
high = mid - 1;
pageId = treeEntry.getPrevPageId();
flavour = Flavour.MORE;
} else {
// got exact match
low = mid;
break;
}
}
if (treeEntry != null) {
TreePage treePage = tree.lookupPage(pageId);
result = new TreePageEntry(treeEntry, treePage, flavour, low);
}
return result;
}
private int doInsertEntry(TreeEntry newEntry) throws IOException {
int low = 0;
int high = size() - 1;
while (low <= high) {
int mid = (low + high) >> 1;
TreeEntry midVal = getTreeEntry(mid);
int cmp = midVal.compareTo(newEntry);
if (cmp < 0) {
low = mid + 1;
} else if (cmp > 0) {
high = mid - 1;
}
}
insertTreeEntry(low, newEntry);
return low;
}
private void insertTreeEntry(int index, TreeEntry entry) throws IOException {
int p = index - 1;
int n = index;
TreeEntry prevEntry = (p >= 0 && p < treeEntries.size()) ? treeEntries.get(p) : null;
TreeEntry nextEntry = (n >= 0 && n < treeEntries.size()) ? treeEntries.get(n) : null;
if (prevEntry != null) {
if (prevEntry.getNextPageId() == entry.getNextPageId()) {
prevEntry.setNextPageId(TreeEntry.NOT_SET);
}
if (entry.getPrevPageId() == TreeEntry.NOT_SET) {
entry.setPrevPageId(prevEntry.getNextPageId());
}
}
if (nextEntry != null) {
if (nextEntry.getPrevPageId() == entry.getPrevPageId()) {
nextEntry.setPrevPageId(TreeEntry.NOT_SET);
}
if (entry.getNextPageId() == TreeEntry.NOT_SET) {
entry.setNextPageId(nextEntry.getPrevPageId());
}
}
addTreeEntry(index, entry);
}
private void resetPageReference(int index, long pageId) {
int p = index - 1;
int n = index;
TreeEntry prevEntry = (p >= 0 && p < treeEntries.size()) ? treeEntries.get(p) : null;
TreeEntry nextEntry = (n >= 0 && n < treeEntries.size()) ? treeEntries.get(n) : null;
if (prevEntry != null) {
if (prevEntry.getNextPageId() == pageId) {
prevEntry.setNextPageId(TreeEntry.NOT_SET);
}
}
if (nextEntry != null) {
if (nextEntry.getPrevPageId() == pageId) {
nextEntry.setPrevPageId(TreeEntry.NOT_SET);
}
}
}
private boolean resetPageReference(long pageId) {
boolean updated = false;
for (TreeEntry entry : treeEntries) {
if (entry.getPrevPageId() == pageId) {
entry.setPrevPageId(TreeEntry.NOT_SET);
updated = true;
}
if (entry.getNextPageId() == pageId) {
entry.setNextPageId(TreeEntry.NOT_SET);
updated = true;
}
}
return updated;
}
private void resetParentId(long newParentId, List<TreeEntry> entries) throws IOException {
Set<Long> set = new HashSet<Long>();
for (TreeEntry entry : entries) {
if (entry != null) {
set.add(entry.getPrevPageId());
set.add(entry.getNextPageId());
}
}
for (Long pageId : set) {
TreePage page = tree.lookupPage(pageId);
if (page != null) {
page.setParentId(newParentId);
}
}
}
private void addTreeEntry(int index, TreeEntry entry) throws IOException {
treeEntries.add(index, entry);
}
private TreeEntry removeTreeEntry(int index) throws IOException {
TreeEntry result = treeEntries.remove(index);
return result;
}
private void removeAllTreeEntries(List<TreeEntry> c) {
treeEntries.removeAll(c);
}
private List<TreeEntry> getSubList(int from, int to) {
return new ArrayList<TreeEntry>(treeEntries.subList(from, to));
}
private TreeEntry getTreeEntry(int index) {
TreeEntry result = treeEntries.get(index);
return result;
}
void saveHeader() throws IOException {
tree.writePage(this);
}
void save() throws IOException {
tree.writeFullPage(this);
}
protected void dump() throws IOException {
LOG.info(this.toString());
Set<Long> set = new HashSet<Long>();
for (TreeEntry entry : treeEntries) {
if (entry != null) {
LOG.info(entry.toString());
set.add(entry.getPrevPageId());
set.add(entry.getNextPageId());
}
}
for (Long pageId : set) {
TreePage page = tree.lookupPage(pageId);
if (page != null) {
page.dump();
}
}
}
}

View File

@ -1,93 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.kaha.impl.index.tree;
/**
* A conglomarate used for return results from a tree lookup
*
*
*/
class TreePageEntry {
private TreeEntry treeEntry;
private TreePage treePage;
private TreePage.Flavour flavour;
private int index = -1;
TreePageEntry(TreeEntry treeEntry, TreePage treePage, TreePage.Flavour flavour, int index) {
this.treeEntry = treeEntry;
this.treePage = treePage;
this.flavour = flavour;
this.index = index;
}
/**
* @return the flavour
*/
TreePage.Flavour getFlavour() {
return this.flavour;
}
/**
* @param flavour the flavour to set
*/
void setFlavour(TreePage.Flavour flavour) {
this.flavour = flavour;
}
/**
* @return the treePage
*/
TreePage getTreePage() {
return this.treePage;
}
/**
* @param treePage the treePage to set
*/
void setTreePage(TreePage treePage) {
this.treePage = treePage;
}
/**
* @return the index
*/
public int getIndex() {
return this.index;
}
/**
* @param index the index to set
*/
public void setIndex(int index) {
this.index = index;
}
/**
* @return the treeEntry
*/
public TreeEntry getTreeEntry() {
return this.treeEntry;
}
/**
* @param treeEntry the treeEntry to set
*/
public void setTreeEntry(TreeEntry treeEntry) {
this.treeEntry = treeEntry;
}
}

View File

@ -1,25 +0,0 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<html>
<head>
</head>
<body>
BTree implementation of an index for a Map
</body>
</html>

View File

@ -1,25 +0,0 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<html>
<head>
</head>
<body>
Kaha implementation classes
</body>
</html>

View File

@ -1,27 +0,0 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<html>
<head>
</head>
<body>
<p>
fast message persistence implementation
</p>
</body>
</html>

View File

@ -1,114 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.store;
import java.io.IOException;
import java.util.Map;
import java.util.Set;
import org.apache.activemq.command.ActiveMQQueue;
import org.apache.activemq.command.ActiveMQTopic;
import org.apache.activemq.command.SubscriptionInfo;
import org.apache.activemq.command.TransactionId;
import org.apache.activemq.store.amq.AMQTx;
/**
* Adapter to the actual persistence mechanism used with ActiveMQ
*
*
*/
public interface ReferenceStoreAdapter extends PersistenceAdapter {
/**
* Factory method to create a new queue message store with the given
* destination name
*
* @param destination
* @return the QueueReferenceStore
* @throws IOException
*/
ReferenceStore createQueueReferenceStore(ActiveMQQueue destination) throws IOException;
/**
* Factory method to create a new topic message store with the given
* destination name
*
* @param destination
* @return the TopicRefererenceStore
* @throws IOException
*/
TopicReferenceStore createTopicReferenceStore(ActiveMQTopic destination) throws IOException;
/**
* @return Set of File ids in use
* @throws IOException
*/
Set<Integer> getReferenceFileIdsInUse() throws IOException;
/**
* If the store isn't valid, it can be recoverd at start-up
*
* @return true if the reference store is in a consistent state
*/
boolean isStoreValid();
/**
* called by recover to clear out message references
*
* @throws IOException
*/
void clearMessages() throws IOException;
/**
* recover any state
*
* @throws IOException
*/
void recoverState() throws IOException;
/**
* Save prepared transactions
*
* @param map
* @throws IOException
*/
void savePreparedState(Map<TransactionId, AMQTx> map) throws IOException;
/**
* @return saved prepared transactions
* @throws IOException
*/
Map<TransactionId, AMQTx> retrievePreparedState() throws IOException;
/**
* @return the maxDataFileLength
*/
long getMaxDataFileLength();
/**
* set the max data length of a reference data log - if used
* @param maxDataFileLength
*/
void setMaxDataFileLength(long maxDataFileLength);
/**
* Recover particular subscription. Used for recovery of durable consumers
* @param info
* @throws IOException
*/
void recoverSubscription(SubscriptionInfo info) throws IOException;
}

View File

@ -1,351 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.store.amq;
import java.io.File;
import java.io.InputStream;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Scanner;
import org.apache.activemq.command.ActiveMQBlobMessage;
import org.apache.activemq.command.ActiveMQBytesMessage;
import org.apache.activemq.command.ActiveMQMapMessage;
import org.apache.activemq.command.ActiveMQMessage;
import org.apache.activemq.command.ActiveMQObjectMessage;
import org.apache.activemq.command.ActiveMQStreamMessage;
import org.apache.activemq.command.ActiveMQTextMessage;
import org.apache.activemq.command.DataStructure;
import org.apache.activemq.command.JournalQueueAck;
import org.apache.activemq.command.JournalTopicAck;
import org.apache.activemq.command.JournalTrace;
import org.apache.activemq.command.JournalTransaction;
import org.apache.activemq.kaha.impl.async.Location;
import org.apache.activemq.kaha.impl.async.ReadOnlyAsyncDataManager;
import org.apache.activemq.openwire.OpenWireFormat;
import org.apache.activemq.util.ByteSequence;
import org.apache.activemq.wireformat.WireFormat;
import org.apache.velocity.Template;
import org.apache.velocity.VelocityContext;
import org.apache.velocity.app.Velocity;
import org.apache.velocity.app.VelocityEngine;
import org.josql.Query;
/**
* Allows you to view the contents of a Journal.
*/
public class AMQJournalTool {
private final ArrayList<File> dirs = new ArrayList<File>();
private final WireFormat wireFormat = new OpenWireFormat();
private final HashMap<String, String> resources = new HashMap<String, String>();
private String messageFormat = "${location.dataFileId},${location.offset}|${type}|${record.destination}|${record.messageId}|${record.properties}|${body}";
private String topicAckFormat = "${location.dataFileId},${location.offset}|${type}|${record.destination}|${record.clientId}|${record.subscritionName}|${record.messageId}";
private String queueAckFormat = "${location.dataFileId},${location.offset}|${type}|${record.destination}|${record.messageAck.lastMessageId}";
private String transactionFormat = "${location.dataFileId},${location.offset}|${type}|${record.transactionId}";
private String traceFormat = "${location.dataFileId},${location.offset}|${type}|${record.message}";
private String unknownFormat = "${location.dataFileId},${location.offset}|${type}|${record.class.name}";
private String where;
private VelocityContext context;
private VelocityEngine velocity;
private boolean help;
public static void main(String[] args) throws Exception {
AMQJournalTool consumerTool = new AMQJournalTool();
String[] directories = CommandLineSupport
.setOptions(consumerTool, args);
if (directories.length < 1) {
System.out
.println("Please specify the directories with journal data to scan");
return;
}
for (int i = 0; i < directories.length; i++) {
consumerTool.getDirs().add(new File(directories[i]));
}
consumerTool.execute();
}
public void execute() throws Exception {
if( help ) {
showHelp();
return;
}
if (getDirs().size() < 1) {
System.out.println("");
System.out.println("Invalid Usage: Please specify the directories with journal data to scan");
System.out.println("");
showHelp();
return;
}
for (File dir : getDirs()) {
if( !dir.exists() ) {
System.out.println("");
System.out.println("Invalid Usage: the directory '"+dir.getPath()+"' does not exist");
System.out.println("");
showHelp();
return;
}
if( !dir.isDirectory() ) {
System.out.println("");
System.out.println("Invalid Usage: the argument '"+dir.getPath()+"' is not a directory");
System.out.println("");
showHelp();
return;
}
}
context = new VelocityContext();
List keys = Arrays.asList(context.getKeys());
for (Iterator iterator = System.getProperties().entrySet()
.iterator(); iterator.hasNext();) {
Map.Entry kv = (Map.Entry) iterator.next();
String name = (String) kv.getKey();
String value = (String) kv.getValue();
if (!keys.contains(name)) {
context.put(name, value);
}
}
velocity = new VelocityEngine();
velocity.setProperty(Velocity.RESOURCE_LOADER, "all");
velocity.setProperty("all.resource.loader.class", CustomResourceLoader.class.getName());
velocity.init();
resources.put("message", messageFormat);
resources.put("topicAck", topicAckFormat);
resources.put("queueAck", queueAckFormat);
resources.put("transaction", transactionFormat);
resources.put("trace", traceFormat);
resources.put("unknown", unknownFormat);
Query query = null;
if (where != null) {
query = new Query();
query.parse("select * from "+Entry.class.getName()+" where "+where);
}
ReadOnlyAsyncDataManager manager = new ReadOnlyAsyncDataManager(getDirs());
manager.start();
try {
Location curr = manager.getFirstLocation();
while (curr != null) {
ByteSequence data = manager.read(curr);
DataStructure c = (DataStructure) wireFormat.unmarshal(data);
Entry entry = new Entry();
entry.setLocation(curr);
entry.setRecord(c);
entry.setData(data);
entry.setQuery(query);
process(entry);
curr = manager.getNextLocation(curr);
}
} finally {
manager.close();
}
}
private void showHelp() {
InputStream is = AMQJournalTool.class.getResourceAsStream("help.txt");
Scanner scanner = new Scanner(is);
while (scanner.hasNextLine()) {
String line = scanner.nextLine();
System.out.println(line);
}
scanner.close(); }
private void process(Entry entry) throws Exception {
Location location = entry.getLocation();
DataStructure record = entry.getRecord();
switch (record.getDataStructureType()) {
case ActiveMQMessage.DATA_STRUCTURE_TYPE:
entry.setType("ActiveMQMessage");
entry.setFormater("message");
display(entry);
break;
case ActiveMQBytesMessage.DATA_STRUCTURE_TYPE:
entry.setType("ActiveMQBytesMessage");
entry.setFormater("message");
display(entry);
break;
case ActiveMQBlobMessage.DATA_STRUCTURE_TYPE:
entry.setType("ActiveMQBlobMessage");
entry.setFormater("message");
display(entry);
break;
case ActiveMQMapMessage.DATA_STRUCTURE_TYPE:
entry.setType("ActiveMQMapMessage");
entry.setFormater("message");
display(entry);
break;
case ActiveMQObjectMessage.DATA_STRUCTURE_TYPE:
entry.setType("ActiveMQObjectMessage");
entry.setFormater("message");
display(entry);
break;
case ActiveMQStreamMessage.DATA_STRUCTURE_TYPE:
entry.setType("ActiveMQStreamMessage");
entry.setFormater("message");
display(entry);
break;
case ActiveMQTextMessage.DATA_STRUCTURE_TYPE:
entry.setType("ActiveMQTextMessage");
entry.setFormater("message");
display(entry);
break;
case JournalQueueAck.DATA_STRUCTURE_TYPE:
entry.setType("Queue Ack");
entry.setFormater("queueAck");
display(entry);
break;
case JournalTopicAck.DATA_STRUCTURE_TYPE:
entry.setType("Topic Ack");
entry.setFormater("topicAck");
display(entry);
break;
case JournalTransaction.DATA_STRUCTURE_TYPE:
entry.setType(getType((JournalTransaction) record));
entry.setFormater("transaction");
display(entry);
break;
case JournalTrace.DATA_STRUCTURE_TYPE:
entry.setType("Trace");
entry.setFormater("trace");
display(entry);
break;
default:
entry.setType("Unknown");
entry.setFormater("unknown");
display(entry);
break;
}
}
private String getType(JournalTransaction record) {
switch (record.getType()) {
case JournalTransaction.XA_PREPARE:
return "XA Prepare";
case JournalTransaction.XA_COMMIT:
return "XA Commit";
case JournalTransaction.XA_ROLLBACK:
return "XA Rollback";
case JournalTransaction.LOCAL_COMMIT:
return "Commit";
case JournalTransaction.LOCAL_ROLLBACK:
return "Rollback";
}
return "Unknown Transaction";
}
private void display(Entry entry) throws Exception {
if (entry.getQuery() != null) {
List list = Collections.singletonList(entry);
List results = entry.getQuery().execute(list).getResults();
if (results.isEmpty()) {
return;
}
}
CustomResourceLoader.setResources(resources);
try {
context.put("location", entry.getLocation());
context.put("record", entry.getRecord());
context.put("type", entry.getType());
if (entry.getRecord() instanceof ActiveMQMessage) {
context.put("body", new MessageBodyFormatter(
(ActiveMQMessage) entry.getRecord()));
}
Template template = velocity.getTemplate(entry.getFormater());
PrintWriter writer = new PrintWriter(System.out);
template.merge(context, writer);
writer.println();
writer.flush();
} finally {
CustomResourceLoader.setResources(null);
}
}
public void setMessageFormat(String messageFormat) {
this.messageFormat = messageFormat;
}
public void setTopicAckFormat(String ackFormat) {
this.topicAckFormat = ackFormat;
}
public void setTransactionFormat(String transactionFormat) {
this.transactionFormat = transactionFormat;
}
public void setTraceFormat(String traceFormat) {
this.traceFormat = traceFormat;
}
public void setUnknownFormat(String unknownFormat) {
this.unknownFormat = unknownFormat;
}
public void setQueueAckFormat(String queueAckFormat) {
this.queueAckFormat = queueAckFormat;
}
public String getQuery() {
return where;
}
public void setWhere(String query) {
this.where = query;
}
public boolean isHelp() {
return help;
}
public void setHelp(boolean help) {
this.help = help;
}
/**
* @return the dirs
*/
public ArrayList<File> getDirs() {
return dirs;
}
}

View File

@ -1,54 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.store.amq;
import java.io.File;
import java.util.List;
import org.apache.activemq.console.CommandContext;
import org.apache.activemq.console.command.Command;
public class AMQJournalToolCommand implements Command {
private CommandContext context;
@Override
public String getName() {
return "journal-audit";
}
@Override
public String getOneLineDescription() {
return "Allows you to view records stored in the persistent journal.";
}
public void execute(List<String> tokens) throws Exception {
AMQJournalTool consumerTool = new AMQJournalTool();
String args[] = new String[tokens.size()];
tokens.toArray(args);
String[] directories = CommandLineSupport.setOptions(consumerTool, args);
for (int i = 0; i < directories.length; i++) {
consumerTool.getDirs().add(new File(directories[i]));
}
consumerTool.execute();
}
public void setCommandContext(CommandContext context) {
this.context = context;
}
}

View File

@ -1,584 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.store.amq;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Map.Entry;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.Lock;
import org.apache.activemq.broker.ConnectionContext;
import org.apache.activemq.command.ActiveMQDestination;
import org.apache.activemq.command.DataStructure;
import org.apache.activemq.command.JournalQueueAck;
import org.apache.activemq.command.Message;
import org.apache.activemq.command.MessageAck;
import org.apache.activemq.command.MessageId;
import org.apache.activemq.filter.NonCachedMessageEvaluationContext;
import org.apache.activemq.kaha.MessageAckWithLocation;
import org.apache.activemq.kaha.impl.async.Location;
import org.apache.activemq.store.AbstractMessageStore;
import org.apache.activemq.store.MessageRecoveryListener;
import org.apache.activemq.store.PersistenceAdapter;
import org.apache.activemq.store.ReferenceStore;
import org.apache.activemq.store.ReferenceStore.ReferenceData;
import org.apache.activemq.thread.Task;
import org.apache.activemq.thread.TaskRunner;
import org.apache.activemq.transaction.Synchronization;
import org.apache.activemq.usage.MemoryUsage;
import org.apache.activemq.util.Callback;
import org.apache.activemq.util.TransactionTemplate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A MessageStore that uses a Journal to store it's messages.
*
*
*/
public class AMQMessageStore extends AbstractMessageStore {
private static final Logger LOG = LoggerFactory.getLogger(AMQMessageStore.class);
protected final AMQPersistenceAdapter peristenceAdapter;
protected final AMQTransactionStore transactionStore;
protected final ReferenceStore referenceStore;
protected final TransactionTemplate transactionTemplate;
protected Location lastLocation;
protected Location lastWrittenLocation;
protected Set<Location> inFlightTxLocations = new HashSet<Location>();
protected final TaskRunner asyncWriteTask;
protected CountDownLatch flushLatch;
private Map<MessageId, ReferenceData> messages = new LinkedHashMap<MessageId, ReferenceData>();
private List<MessageAckWithLocation> messageAcks = new ArrayList<MessageAckWithLocation>();
/** A MessageStore that we can use to retrieve messages quickly. */
private Map<MessageId, ReferenceData> cpAddedMessageIds;
private final boolean debug = LOG.isDebugEnabled();
private final AtomicReference<Location> mark = new AtomicReference<Location>();
protected final Lock lock;
public AMQMessageStore(AMQPersistenceAdapter adapter, ReferenceStore referenceStore, ActiveMQDestination destination) {
super(destination);
this.peristenceAdapter = adapter;
this.lock = referenceStore.getStoreLock();
this.transactionStore = adapter.getTransactionStore();
this.referenceStore = referenceStore;
this.transactionTemplate = new TransactionTemplate(adapter, new ConnectionContext(
new NonCachedMessageEvaluationContext()));
asyncWriteTask = adapter.getTaskRunnerFactory().createTaskRunner(new Task() {
public boolean iterate() {
asyncWrite();
return false;
}
}, "Checkpoint: " + destination);
}
public void setMemoryUsage(MemoryUsage memoryUsage) {
referenceStore.setMemoryUsage(memoryUsage);
}
/**
* Not synchronize since the Journal has better throughput if you increase the number of concurrent writes that it
* is doing.
*/
public final void addMessage(ConnectionContext context, final Message message) throws IOException {
final MessageId id = message.getMessageId();
final Location location = peristenceAdapter.writeCommand(message, message.isResponseRequired());
if (!context.isInTransaction()) {
if (debug) {
LOG.debug("Journalled message add for: " + id + ", at: " + location);
}
this.peristenceAdapter.addInProgressDataFile(this, location.getDataFileId());
addMessage(message, location);
} else {
if (debug) {
LOG.debug("Journalled transacted message add for: " + id + ", at: " + location);
}
lock.lock();
try {
inFlightTxLocations.add(location);
} finally {
lock.unlock();
}
transactionStore.addMessage(this, message, location);
context.getTransaction().addSynchronization(new Synchronization() {
public void afterCommit() throws Exception {
if (debug) {
LOG.debug("Transacted message add commit for: " + id + ", at: " + location);
}
lock.lock();
try {
inFlightTxLocations.remove(location);
} finally {
lock.unlock();
}
addMessage(message, location);
}
public void afterRollback() throws Exception {
if (debug) {
LOG.debug("Transacted message add rollback for: " + id + ", at: " + location);
}
lock.lock();
try {
inFlightTxLocations.remove(location);
} finally {
lock.unlock();
}
}
});
}
}
final void addMessage(final Message message, final Location location) throws InterruptedIOException {
ReferenceData data = new ReferenceData();
data.setExpiration(message.getExpiration());
data.setFileId(location.getDataFileId());
data.setOffset(location.getOffset());
lock.lock();
try {
lastLocation = location;
ReferenceData prev = messages.put(message.getMessageId(), data);
if (prev != null) {
AMQMessageStore.this.peristenceAdapter.removeInProgressDataFile(AMQMessageStore.this, prev.getFileId());
}
} finally {
lock.unlock();
}
if (messages.size() > this.peristenceAdapter.getMaxCheckpointMessageAddSize()) {
flush();
} else {
try {
asyncWriteTask.wakeup();
} catch (InterruptedException e) {
throw new InterruptedIOException();
}
}
}
public boolean replayAddMessage(ConnectionContext context, Message message, Location location) {
MessageId id = message.getMessageId();
try {
// Only add the message if it has not already been added.
ReferenceData data = referenceStore.getMessageReference(id);
if (data == null) {
data = new ReferenceData();
data.setExpiration(message.getExpiration());
data.setFileId(location.getDataFileId());
data.setOffset(location.getOffset());
referenceStore.addMessageReference(context, id, data);
return true;
}
} catch (Throwable e) {
LOG.warn("Could not replay add for message '" + id + "'. Message may have already been added. reason: "
+ e, e);
}
return false;
}
/**
*/
public void removeMessage(final ConnectionContext context, final MessageAck ack) throws IOException {
JournalQueueAck remove = new JournalQueueAck();
remove.setDestination(destination);
remove.setMessageAck(ack);
final Location location = peristenceAdapter.writeCommand(remove, ack.isResponseRequired());
if (!context.isInTransaction()) {
if (debug) {
LOG.debug("Journalled message remove for: " + ack.getLastMessageId() + ", at: " + location);
}
removeMessage(ack, location);
} else {
if (debug) {
LOG.debug("Journalled transacted message remove for: " + ack.getLastMessageId() + ", at: " + location);
}
lock.lock();
try {
inFlightTxLocations.add(location);
} finally {
lock.unlock();
}
transactionStore.removeMessage(this, ack, location);
context.getTransaction().addSynchronization(new Synchronization() {
public void afterCommit() throws Exception {
if (debug) {
LOG.debug("Transacted message remove commit for: " + ack.getLastMessageId() + ", at: "
+ location);
}
lock.lock();
try {
inFlightTxLocations.remove(location);
} finally {
lock.unlock();
}
removeMessage(ack, location);
}
public void afterRollback() throws Exception {
if (debug) {
LOG.debug("Transacted message remove rollback for: " + ack.getLastMessageId() + ", at: "
+ location);
}
lock.lock();
try {
inFlightTxLocations.remove(location);
} finally {
lock.unlock();
}
}
});
}
}
final void removeMessage(final MessageAck ack, final Location location) throws InterruptedIOException {
ReferenceData data;
lock.lock();
try {
lastLocation = location;
MessageId id = ack.getLastMessageId();
data = messages.remove(id);
if (data == null) {
messageAcks.add(new MessageAckWithLocation(ack, location));
} else {
// message never got written so datafileReference will still exist
AMQMessageStore.this.peristenceAdapter.removeInProgressDataFile(AMQMessageStore.this, data.getFileId());
}
} finally {
lock.unlock();
}
if (messageAcks.size() > this.peristenceAdapter.getMaxCheckpointMessageAddSize()) {
flush();
} else if (data == null) {
try {
asyncWriteTask.wakeup();
} catch (InterruptedException e) {
throw new InterruptedIOException();
}
}
}
public boolean replayRemoveMessage(ConnectionContext context, MessageAck messageAck) {
try {
// Only remove the message if it has not already been removed.
ReferenceData t = referenceStore.getMessageReference(messageAck.getLastMessageId());
if (t != null) {
referenceStore.removeMessage(context, messageAck);
return true;
}
} catch (Throwable e) {
LOG.warn("Could not replay acknowledge for message '" + messageAck.getLastMessageId()
+ "'. Message may have already been acknowledged. reason: " + e);
}
return false;
}
/**
* Waits till the lastest data has landed on the referenceStore
*
* @throws InterruptedIOException
*/
public void flush() throws InterruptedIOException {
if (LOG.isDebugEnabled()) {
LOG.debug("flush starting ...");
}
CountDownLatch countDown;
lock.lock();
try {
if (lastWrittenLocation == lastLocation) {
return;
}
if (flushLatch == null) {
flushLatch = new CountDownLatch(1);
}
countDown = flushLatch;
} finally {
lock.unlock();
}
try {
asyncWriteTask.wakeup();
countDown.await();
} catch (InterruptedException e) {
throw new InterruptedIOException();
}
if (LOG.isDebugEnabled()) {
LOG.debug("flush finished");
}
}
/**
* @return
* @throws IOException
*/
synchronized void asyncWrite() {
try {
CountDownLatch countDown;
lock.lock();
try {
countDown = flushLatch;
flushLatch = null;
} finally {
lock.unlock();
}
mark.set(doAsyncWrite());
if (countDown != null) {
countDown.countDown();
}
} catch (IOException e) {
LOG.error("Checkpoint failed: " + e, e);
}
}
/**
* @return
* @throws IOException
*/
protected Location doAsyncWrite() throws IOException {
final List<MessageAckWithLocation> cpRemovedMessageLocations;
final List<Location> cpActiveJournalLocations;
final int maxCheckpointMessageAddSize = peristenceAdapter.getMaxCheckpointMessageAddSize();
final Location lastLocation;
// swap out the message hash maps..
lock.lock();
try {
cpAddedMessageIds = this.messages;
cpRemovedMessageLocations = this.messageAcks;
cpActiveJournalLocations = new ArrayList<Location>(inFlightTxLocations);
this.messages = new LinkedHashMap<MessageId, ReferenceData>();
this.messageAcks = new ArrayList<MessageAckWithLocation>();
lastLocation = this.lastLocation;
} finally {
lock.unlock();
}
if (LOG.isDebugEnabled()) {
LOG.debug("Doing batch update... adding: " + cpAddedMessageIds.size() + " removing: "
+ cpRemovedMessageLocations.size() + " ");
}
transactionTemplate.run(new Callback() {
public void execute() throws Exception {
int size = 0;
PersistenceAdapter persitanceAdapter = transactionTemplate.getPersistenceAdapter();
ConnectionContext context = transactionTemplate.getContext();
// Checkpoint the added messages.
Iterator<Entry<MessageId, ReferenceData>> iterator = cpAddedMessageIds.entrySet().iterator();
while (iterator.hasNext()) {
Entry<MessageId, ReferenceData> entry = iterator.next();
try {
if (referenceStore.addMessageReference(context, entry.getKey(), entry.getValue())) {
if (LOG.isDebugEnabled()) {
LOG.debug("adding message ref:" + entry.getKey());
}
size++;
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("not adding duplicate reference: " + entry.getKey() + ", " + entry.getValue());
}
}
AMQMessageStore.this.peristenceAdapter.removeInProgressDataFile(AMQMessageStore.this, entry
.getValue().getFileId());
} catch (Throwable e) {
LOG.warn("Message could not be added to long term store: " + e.getMessage(), e);
}
// Commit the batch if it's getting too big
if (size >= maxCheckpointMessageAddSize) {
persitanceAdapter.commitTransaction(context);
persitanceAdapter.beginTransaction(context);
size = 0;
}
}
persitanceAdapter.commitTransaction(context);
persitanceAdapter.beginTransaction(context);
// Checkpoint the removed messages.
for (MessageAckWithLocation ack : cpRemovedMessageLocations) {
try {
referenceStore.removeMessage(transactionTemplate.getContext(), ack);
} catch (Throwable e) {
LOG.warn("Message could not be removed from long term store: " + e.getMessage(), e);
}
}
}
});
LOG.debug("Batch update done. lastLocation:" + lastLocation);
lock.lock();
try {
cpAddedMessageIds = null;
lastWrittenLocation = lastLocation;
} finally {
lock.unlock();
}
if (cpActiveJournalLocations.size() > 0) {
Collections.sort(cpActiveJournalLocations);
return cpActiveJournalLocations.get(0);
} else {
return lastLocation;
}
}
/**
*
*/
public Message getMessage(MessageId identity) throws IOException {
Location location = getLocation(identity);
if (location != null) {
DataStructure rc = peristenceAdapter.readCommand(location);
try {
return (Message) rc;
} catch (ClassCastException e) {
throw new IOException("Could not read message " + identity + " at location " + location
+ ", expected a message, but got: " + rc);
}
}
return null;
}
protected Location getLocation(MessageId messageId) throws IOException {
ReferenceData data = null;
lock.lock();
try {
// Is it still in flight???
data = messages.get(messageId);
if (data == null && cpAddedMessageIds != null) {
data = cpAddedMessageIds.get(messageId);
}
} finally {
lock.unlock();
}
if (data == null) {
data = referenceStore.getMessageReference(messageId);
if (data == null) {
return null;
}
}
Location location = new Location();
location.setDataFileId(data.getFileId());
location.setOffset(data.getOffset());
return location;
}
/**
* Replays the referenceStore first as those messages are the oldest ones, then messages are replayed from the
* transaction log and then the cache is updated.
*
* @param listener
* @throws Exception
*/
public void recover(final MessageRecoveryListener listener) throws Exception {
flush();
referenceStore.recover(new RecoveryListenerAdapter(this, listener));
}
public void start() throws Exception {
referenceStore.start();
}
public void stop() throws Exception {
flush();
asyncWriteTask.shutdown();
referenceStore.stop();
}
/**
* @return Returns the longTermStore.
*/
public ReferenceStore getReferenceStore() {
return referenceStore;
}
/**
* @see org.apache.activemq.store.MessageStore#removeAllMessages(ConnectionContext)
*/
public void removeAllMessages(ConnectionContext context) throws IOException {
flush();
referenceStore.removeAllMessages(context);
}
public void addMessageReference(ConnectionContext context, MessageId messageId, long expirationTime,
String messageRef) throws IOException {
throw new IOException("The journal does not support message references.");
}
public String getMessageReference(MessageId identity) throws IOException {
throw new IOException("The journal does not support message references.");
}
/**
* @return
* @throws IOException
* @see org.apache.activemq.store.MessageStore#getMessageCount()
*/
public int getMessageCount() throws IOException {
flush();
return referenceStore.getMessageCount();
}
public void recoverNextMessages(int maxReturned, MessageRecoveryListener listener) throws Exception {
RecoveryListenerAdapter recoveryListener = new RecoveryListenerAdapter(this, listener);
referenceStore.recoverNextMessages(maxReturned, recoveryListener);
if (recoveryListener.size() == 0 && recoveryListener.hasSpace()) {
flush();
referenceStore.recoverNextMessages(maxReturned, recoveryListener);
}
}
Message getMessage(ReferenceData data) throws IOException {
Location location = new Location();
location.setDataFileId(data.getFileId());
location.setOffset(data.getOffset());
DataStructure rc = peristenceAdapter.readCommand(location);
try {
return (Message) rc;
} catch (ClassCastException e) {
throw new IOException("Could not read message at location " + location + ", expected a message, but got: "
+ rc);
}
}
public void resetBatching() {
referenceStore.resetBatching();
}
public Location getMark() {
return mark.get();
}
public void dispose(ConnectionContext context) {
try {
flush();
} catch (InterruptedIOException e) {
Thread.currentThread().interrupt();
}
referenceStore.dispose(context);
super.dispose(context);
}
public void setBatch(MessageId messageId) {
try {
flush();
} catch (InterruptedIOException e) {
LOG.debug("flush on setBatch resulted in exception", e);
}
getReferenceStore().setBatch(messageId);
}
}

View File

@ -1,343 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.store.amq;
import java.io.File;
import org.apache.activemq.kaha.impl.async.AsyncDataManager;
import org.apache.activemq.kaha.impl.index.hash.HashIndex;
import org.apache.activemq.store.PersistenceAdapter;
import org.apache.activemq.store.PersistenceAdapterFactory;
import org.apache.activemq.store.ReferenceStoreAdapter;
import org.apache.activemq.thread.TaskRunnerFactory;
import org.apache.activemq.util.IOHelper;
/**
* An implementation of {@link PersistenceAdapterFactory}
*
* @org.apache.xbean.XBean element="amqPersistenceAdapterFactory"
*
*
*/
public class AMQPersistenceAdapterFactory implements PersistenceAdapterFactory {
static final int DEFAULT_MAX_REFERNCE_FILE_LENGTH=2*1024*1024;
private File dataDirectory;
private int journalThreadPriority = Thread.MAX_PRIORITY;
private String brokerName = "localhost";
private ReferenceStoreAdapter referenceStoreAdapter;
private boolean syncOnWrite;
private boolean syncOnTransaction=true;
private boolean persistentIndex=true;
private boolean useNio = true;
private int maxFileLength = AsyncDataManager.DEFAULT_MAX_FILE_LENGTH;
private long cleanupInterval = AsyncDataManager.DEFAULT_CLEANUP_INTERVAL;
private int indexBinSize = HashIndex.DEFAULT_BIN_SIZE;
private int indexKeySize = HashIndex.DEFAULT_KEY_SIZE;
private int indexPageSize = HashIndex.DEFAULT_PAGE_SIZE;
private int indexMaxBinSize = HashIndex.MAXIMUM_CAPACITY;
private int indexLoadFactor = HashIndex.DEFAULT_LOAD_FACTOR;
private int maxReferenceFileLength=DEFAULT_MAX_REFERNCE_FILE_LENGTH;
private boolean recoverReferenceStore=true;
private boolean forceRecoverReferenceStore=false;
private long checkpointInterval = 1000 * 20;
private boolean useDedicatedTaskRunner;
private TaskRunnerFactory taskRunnerFactory;
/**
* @return a AMQPersistenceAdapter
* @see org.apache.activemq.store.PersistenceAdapterFactory#createPersistenceAdapter()
*/
public PersistenceAdapter createPersistenceAdapter() {
AMQPersistenceAdapter result = new AMQPersistenceAdapter();
result.setDirectory(getDataDirectory());
result.setTaskRunnerFactory(getTaskRunnerFactory());
result.setBrokerName(getBrokerName());
result.setSyncOnWrite(isSyncOnWrite());
result.setPersistentIndex(isPersistentIndex());
result.setReferenceStoreAdapter(getReferenceStoreAdapter());
result.setUseNio(isUseNio());
result.setMaxFileLength(getMaxFileLength());
result.setCleanupInterval(getCleanupInterval());
result.setCheckpointInterval(getCheckpointInterval());
result.setIndexBinSize(getIndexBinSize());
result.setIndexKeySize(getIndexKeySize());
result.setIndexPageSize(getIndexPageSize());
result.setIndexMaxBinSize(getIndexMaxBinSize());
result.setIndexLoadFactor(getIndexLoadFactor());
result.setMaxReferenceFileLength(getMaxReferenceFileLength());
result.setForceRecoverReferenceStore(isForceRecoverReferenceStore());
result.setRecoverReferenceStore(isRecoverReferenceStore());
result.setUseDedicatedTaskRunner(isUseDedicatedTaskRunner());
result.setJournalThreadPriority(getJournalThreadPriority());
return result;
}
public long getCleanupInterval() {
return cleanupInterval;
}
public void setCleanupInterval(long val) {
cleanupInterval = val;
}
/**
* @return the dataDirectory
*/
public File getDataDirectory() {
if (this.dataDirectory == null) {
this.dataDirectory = new File(IOHelper.getDefaultDataDirectory(), IOHelper.toFileSystemSafeName(brokerName));
}
return this.dataDirectory;
}
/**
* @param dataDirectory the dataDirectory to set
*/
public void setDataDirectory(File dataDirectory) {
this.dataDirectory = dataDirectory;
}
public boolean isUseDedicatedTaskRunner() {
return useDedicatedTaskRunner;
}
public void setUseDedicatedTaskRunner(boolean useDedicatedTaskRunner) {
this.useDedicatedTaskRunner = useDedicatedTaskRunner;
}
/**
* @return the taskRunnerFactory
*/
public TaskRunnerFactory getTaskRunnerFactory() {
return taskRunnerFactory;
}
/**
* @param taskRunnerFactory the taskRunnerFactory to set
*/
public void setTaskRunnerFactory(TaskRunnerFactory taskRunnerFactory) {
this.taskRunnerFactory = taskRunnerFactory;
}
/**
* @return the journalThreadPriority
*/
public int getJournalThreadPriority() {
return this.journalThreadPriority;
}
/**
* @param journalThreadPriority the journalThreadPriority to set
*/
public void setJournalThreadPriority(int journalThreadPriority) {
this.journalThreadPriority = journalThreadPriority;
}
/**
* @return the brokerName
*/
public String getBrokerName() {
return this.brokerName;
}
/**
* @param brokerName the brokerName to set
*/
public void setBrokerName(String brokerName) {
this.brokerName = brokerName;
}
/**
* @return the referenceStoreAdapter
*/
public ReferenceStoreAdapter getReferenceStoreAdapter() {
return this.referenceStoreAdapter;
}
/**
* @param referenceStoreAdapter the referenceStoreAdapter to set
*/
public void setReferenceStoreAdapter(ReferenceStoreAdapter referenceStoreAdapter) {
this.referenceStoreAdapter = referenceStoreAdapter;
}
public boolean isPersistentIndex() {
return persistentIndex;
}
public void setPersistentIndex(boolean persistentIndex) {
this.persistentIndex = persistentIndex;
}
public boolean isSyncOnWrite() {
return syncOnWrite;
}
public void setSyncOnWrite(boolean syncOnWrite) {
this.syncOnWrite = syncOnWrite;
}
public boolean isSyncOnTransaction() {
return syncOnTransaction;
}
public void setSyncOnTransaction(boolean syncOnTransaction) {
this.syncOnTransaction = syncOnTransaction;
}
public boolean isUseNio() {
return useNio;
}
public void setUseNio(boolean useNio) {
this.useNio = useNio;
}
public int getMaxFileLength() {
return maxFileLength;
}
public void setMaxFileLength(int maxFileLength) {
this.maxFileLength = maxFileLength;
}
/**
* @return the indexBinSize
*/
public int getIndexBinSize() {
return indexBinSize;
}
/**
* @param indexBinSize the indexBinSize to set
*/
public void setIndexBinSize(int indexBinSize) {
this.indexBinSize = indexBinSize;
}
/**
* @return the indexKeySize
*/
public int getIndexKeySize() {
return indexKeySize;
}
/**
* @param indexKeySize the indexKeySize to set
*/
public void setIndexKeySize(int indexKeySize) {
this.indexKeySize = indexKeySize;
}
/**
* @return the indexPageSize
*/
public int getIndexPageSize() {
return indexPageSize;
}
/**
* @param indexPageSize the indexPageSize to set
*/
public void setIndexPageSize(int indexPageSize) {
this.indexPageSize = indexPageSize;
}
/**
* @return the indexMaxBinSize
*/
public int getIndexMaxBinSize() {
return indexMaxBinSize;
}
/**
* @param indexMaxBinSize the indexMaxBinSize to set
*/
public void setIndexMaxBinSize(int indexMaxBinSize) {
this.indexMaxBinSize = indexMaxBinSize;
}
/**
* @return the indexLoadFactor
*/
public int getIndexLoadFactor() {
return indexLoadFactor;
}
/**
* @param indexLoadFactor the indexLoadFactor to set
*/
public void setIndexLoadFactor(int indexLoadFactor) {
this.indexLoadFactor = indexLoadFactor;
}
/**
* @return the maxReferenceFileLength
*/
public int getMaxReferenceFileLength() {
return maxReferenceFileLength;
}
/**
* @param maxReferenceFileLength the maxReferenceFileLength to set
*/
public void setMaxReferenceFileLength(int maxReferenceFileLength) {
this.maxReferenceFileLength = maxReferenceFileLength;
}
/**
* @return the recoverReferenceStore
*/
public boolean isRecoverReferenceStore() {
return recoverReferenceStore;
}
/**
* @param recoverReferenceStore the recoverReferenceStore to set
*/
public void setRecoverReferenceStore(boolean recoverReferenceStore) {
this.recoverReferenceStore = recoverReferenceStore;
}
/**
* @return the forceRecoverReferenceStore
*/
public boolean isForceRecoverReferenceStore() {
return forceRecoverReferenceStore;
}
/**
* @param forceRecoverReferenceStore the forceRecoverReferenceStore to set
*/
public void setForceRecoverReferenceStore(boolean forceRecoverReferenceStore) {
this.forceRecoverReferenceStore = forceRecoverReferenceStore;
}
/**
* @return the checkpointInterval
*/
public long getCheckpointInterval() {
return checkpointInterval;
}
/**
* @param checkpointInterval the checkpointInterval to set
*/
public void setCheckpointInterval(long checkpointInterval) {
this.checkpointInterval = checkpointInterval;
}
}

View File

@ -1,265 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.store.amq;
import java.io.IOException;
import org.apache.activemq.broker.ConnectionContext;
import org.apache.activemq.command.ActiveMQTopic;
import org.apache.activemq.command.JournalTopicAck;
import org.apache.activemq.command.Message;
import org.apache.activemq.command.MessageAck;
import org.apache.activemq.command.MessageId;
import org.apache.activemq.command.SubscriptionInfo;
import org.apache.activemq.filter.BooleanExpression;
import org.apache.activemq.filter.MessageEvaluationContext;
import org.apache.activemq.kaha.impl.async.Location;
import org.apache.activemq.selector.SelectorParser;
import org.apache.activemq.store.MessageRecoveryListener;
import org.apache.activemq.store.TopicMessageStore;
import org.apache.activemq.store.TopicReferenceStore;
import org.apache.activemq.transaction.Synchronization;
import org.apache.activemq.util.IOExceptionSupport;
import org.apache.activemq.util.SubscriptionKey;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A MessageStore that uses a Journal to store it's messages.
*
*
*/
public class AMQTopicMessageStore extends AMQMessageStore implements TopicMessageStore {
private static final Logger LOG = LoggerFactory.getLogger(AMQTopicMessageStore.class);
private TopicReferenceStore topicReferenceStore;
public AMQTopicMessageStore(AMQPersistenceAdapter adapter,TopicReferenceStore topicReferenceStore, ActiveMQTopic destinationName) {
super(adapter, topicReferenceStore, destinationName);
this.topicReferenceStore = topicReferenceStore;
}
public void recoverSubscription(String clientId, String subscriptionName, MessageRecoveryListener listener) throws Exception {
flush();
topicReferenceStore.recoverSubscription(clientId, subscriptionName, new RecoveryListenerAdapter(this, listener));
}
public void recoverNextMessages(String clientId, String subscriptionName,
int maxReturned, final MessageRecoveryListener listener)
throws Exception {
RecoveryListenerAdapter recoveryListener = new RecoveryListenerAdapter(this, listener);
topicReferenceStore.recoverNextMessages(clientId, subscriptionName,maxReturned, recoveryListener);
if (recoveryListener.size() == 0) {
flush();
topicReferenceStore.recoverNextMessages(clientId,subscriptionName, maxReturned, recoveryListener);
}
}
public SubscriptionInfo lookupSubscription(String clientId, String subscriptionName) throws IOException {
return topicReferenceStore.lookupSubscription(clientId, subscriptionName);
}
public void addSubsciption(SubscriptionInfo subscriptionInfo, boolean retroactive) throws IOException {
peristenceAdapter.writeCommand(subscriptionInfo, false);
topicReferenceStore.addSubsciption(subscriptionInfo, retroactive);
}
/**
*/
public void acknowledge(final ConnectionContext context, final String clientId, final String subscriptionName,
final MessageId messageId, final MessageAck originalAck) throws IOException {
final boolean debug = LOG.isDebugEnabled();
JournalTopicAck ack = new JournalTopicAck();
ack.setDestination(destination);
ack.setMessageId(messageId);
ack.setMessageSequenceId(messageId.getBrokerSequenceId());
ack.setSubscritionName(subscriptionName);
ack.setClientId(clientId);
ack.setTransactionId(context.getTransaction() != null ? context.getTransaction().getTransactionId() : null);
final Location location = peristenceAdapter.writeCommand(ack, false);
final SubscriptionKey key = new SubscriptionKey(clientId, subscriptionName);
if (!context.isInTransaction()) {
if (debug) {
LOG.debug("Journalled acknowledge for: " + messageId + ", at: " + location);
}
acknowledge(context,messageId, location, clientId,subscriptionName);
} else {
if (debug) {
LOG.debug("Journalled transacted acknowledge for: " + messageId + ", at: " + location);
}
lock.lock();
try {
inFlightTxLocations.add(location);
}finally {
lock.unlock();
}
transactionStore.acknowledge(this, ack, location);
context.getTransaction().addSynchronization(new Synchronization() {
public void afterCommit() throws Exception {
if (debug) {
LOG.debug("Transacted acknowledge commit for: " + messageId + ", at: " + location);
}
lock.lock();
try {
inFlightTxLocations.remove(location);
acknowledge(context,messageId, location, clientId,subscriptionName);
}finally {
lock.unlock();
}
}
public void afterRollback() throws Exception {
if (debug) {
LOG.debug("Transacted acknowledge rollback for: " + messageId + ", at: " + location);
}
lock.lock();
try{
inFlightTxLocations.remove(location);
}finally {
lock.unlock();
}
}
});
}
}
public boolean replayAcknowledge(ConnectionContext context, String clientId, String subscritionName, MessageId messageId) {
try {
SubscriptionInfo sub = topicReferenceStore.lookupSubscription(clientId, subscritionName);
if (sub != null) {
topicReferenceStore.acknowledge(context, clientId, subscritionName, messageId, null);
return true;
}
} catch (Throwable e) {
LOG.debug("Could not replay acknowledge for message '" + messageId + "'. Message may have already been acknowledged. reason: " + e);
}
return false;
}
/**
* @param messageId
* @param location
* @param key
* @throws IOException
*/
protected void acknowledge(final ConnectionContext context, MessageId messageId,
Location location, String clientId, String subscriptionName)
throws IOException {
MessageAck ack = null;
lock.lock();
try {
lastLocation = location;
}finally {
lock.unlock();
}
if (topicReferenceStore.acknowledgeReference(context, clientId,
subscriptionName, messageId)) {
ack = new MessageAck();
ack.setLastMessageId(messageId);
}
if (ack != null) {
removeMessage(context, ack);
}
}
/**
* @return Returns the longTermStore.
*/
public TopicReferenceStore getTopicReferenceStore() {
return topicReferenceStore;
}
public void deleteSubscription(String clientId, String subscriptionName) throws IOException {
topicReferenceStore.deleteSubscription(clientId, subscriptionName);
}
public SubscriptionInfo[] getAllSubscriptions() throws IOException {
return topicReferenceStore.getAllSubscriptions();
}
public int getMessageCount(String clientId, String subscriberName) throws IOException {
flush();
SubscriptionInfo info = lookupSubscription(clientId, subscriberName);
try {
MessageCounter counter = new MessageCounter(info, this);
topicReferenceStore.recoverSubscription(clientId, subscriberName, counter);
return counter.count;
} catch (Exception e) {
throw IOExceptionSupport.create(e);
}
}
private class MessageCounter implements MessageRecoveryListener {
int count = 0;
SubscriptionInfo info;
BooleanExpression selectorExpression;
TopicMessageStore store;
public MessageCounter(SubscriptionInfo info, TopicMessageStore store) throws Exception {
this.info = info;
if (info != null) {
String selector = info.getSelector();
if (selector != null) {
this.selectorExpression = SelectorParser.parse(selector);
}
}
this.store = store;
}
public boolean recoverMessageReference(MessageId ref) throws Exception {
if (selectorExpression != null) {
MessageEvaluationContext ctx = new MessageEvaluationContext();
ctx.setMessageReference(store.getMessage(ref));
if (selectorExpression.matches(ctx)) {
count++;
}
} else {
count ++;
}
return true;
}
public boolean recoverMessage(Message message) throws Exception {
if (selectorExpression != null) {
MessageEvaluationContext ctx = new MessageEvaluationContext();
ctx.setMessageReference(store.getMessage(message.getMessageId()));
if (selectorExpression.matches(ctx)) {
count++;
}
} else {
count++;
}
return true;
}
public boolean isDuplicate(MessageId ref) {
return false;
}
public boolean hasSpace() {
return true;
}
}
public void resetBatching(String clientId, String subscriptionName) {
topicReferenceStore.resetBatching(clientId, subscriptionName);
}
}

View File

@ -1,284 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.store.amq;
import java.io.IOException;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Map;
import javax.transaction.xa.XAException;
import org.apache.activemq.command.JournalTopicAck;
import org.apache.activemq.command.JournalTransaction;
import org.apache.activemq.command.Message;
import org.apache.activemq.command.MessageAck;
import org.apache.activemq.command.TransactionId;
import org.apache.activemq.command.XATransactionId;
import org.apache.activemq.kaha.impl.async.Location;
import org.apache.activemq.store.TransactionRecoveryListener;
import org.apache.activemq.store.TransactionStore;
/**
*/
public class AMQTransactionStore implements TransactionStore {
protected Map<TransactionId, AMQTx> inflightTransactions = new LinkedHashMap<TransactionId, AMQTx>();
Map<TransactionId, AMQTx> preparedTransactions = new LinkedHashMap<TransactionId, AMQTx>();
private final AMQPersistenceAdapter peristenceAdapter;
private boolean doingRecover;
public AMQTransactionStore(AMQPersistenceAdapter adapter) {
this.peristenceAdapter = adapter;
}
/**
* @throws IOException
* @see org.apache.activemq.store.TransactionStore#prepare(TransactionId)
*/
public void prepare(TransactionId txid) throws IOException {
AMQTx tx = null;
synchronized (inflightTransactions) {
tx = inflightTransactions.remove(txid);
}
if (tx == null) {
return;
}
peristenceAdapter.writeCommand(new JournalTransaction(JournalTransaction.XA_PREPARE, txid, false), true);
synchronized (preparedTransactions) {
preparedTransactions.put(txid, tx);
}
}
/**
* @throws IOException
* @see org.apache.activemq.store.TransactionStore#prepare(TransactionId)
*/
public void replayPrepare(TransactionId txid) throws IOException {
AMQTx tx = null;
synchronized (inflightTransactions) {
tx = inflightTransactions.remove(txid);
}
if (tx == null) {
return;
}
synchronized (preparedTransactions) {
preparedTransactions.put(txid, tx);
}
}
public AMQTx getTx(TransactionId txid, Location location) {
AMQTx tx = null;
synchronized (inflightTransactions) {
tx = inflightTransactions.get(txid);
if (tx == null) {
tx = new AMQTx(location);
inflightTransactions.put(txid, tx);
}
}
return tx;
}
/**
* @throws XAException
* @see org.apache.activemq.store.TransactionStore#commit(org.apache.activemq.service.Transaction)
*/
public void commit(TransactionId txid, boolean wasPrepared, Runnable preCommit,Runnable postCommit) throws IOException {
if (preCommit != null) {
preCommit.run();
}
AMQTx tx;
if (wasPrepared) {
synchronized (preparedTransactions) {
tx = preparedTransactions.remove(txid);
}
} else {
synchronized (inflightTransactions) {
tx = inflightTransactions.remove(txid);
}
}
if (tx == null) {
if (postCommit != null) {
postCommit.run();
}
return;
}
if (txid.isXATransaction()) {
peristenceAdapter.writeCommand(new JournalTransaction(JournalTransaction.XA_COMMIT, txid, wasPrepared), true,true);
} else {
peristenceAdapter.writeCommand(new JournalTransaction(JournalTransaction.LOCAL_COMMIT, txid, wasPrepared), true,true);
}
if (postCommit != null) {
postCommit.run();
}
}
/**
* @throws XAException
* @see org.apache.activemq.store.TransactionStore#commit(org.apache.activemq.service.Transaction)
*/
public AMQTx replayCommit(TransactionId txid, boolean wasPrepared) throws IOException {
if (wasPrepared) {
synchronized (preparedTransactions) {
return preparedTransactions.remove(txid);
}
} else {
synchronized (inflightTransactions) {
return inflightTransactions.remove(txid);
}
}
}
/**
* @throws IOException
* @see org.apache.activemq.store.TransactionStore#rollback(TransactionId)
*/
public void rollback(TransactionId txid) throws IOException {
AMQTx tx = null;
synchronized (inflightTransactions) {
tx = inflightTransactions.remove(txid);
}
if (tx != null) {
synchronized (preparedTransactions) {
tx = preparedTransactions.remove(txid);
}
}
if (tx != null) {
if (txid.isXATransaction()) {
peristenceAdapter.writeCommand(new JournalTransaction(JournalTransaction.XA_ROLLBACK, txid, false), true,true);
} else {
peristenceAdapter.writeCommand(new JournalTransaction(JournalTransaction.LOCAL_ROLLBACK, txid, false), true,true);
}
}
}
/**
* @throws IOException
* @see org.apache.activemq.store.TransactionStore#rollback(TransactionId)
*/
public void replayRollback(TransactionId txid) throws IOException {
boolean inflight = false;
synchronized (inflightTransactions) {
inflight = inflightTransactions.remove(txid) != null;
}
if (inflight) {
synchronized (preparedTransactions) {
preparedTransactions.remove(txid);
}
}
}
public void start() throws Exception {
}
public void stop() throws Exception {
}
public synchronized void recover(TransactionRecoveryListener listener) throws IOException {
// All the in-flight transactions get rolled back..
synchronized (inflightTransactions) {
inflightTransactions.clear();
}
this.doingRecover = true;
try {
Map<TransactionId, AMQTx> txs = null;
synchronized (preparedTransactions) {
txs = new LinkedHashMap<TransactionId, AMQTx>(preparedTransactions);
}
for (Iterator<TransactionId> iter = txs.keySet().iterator(); iter.hasNext();) {
Object txid = iter.next();
AMQTx tx = txs.get(txid);
listener.recover((XATransactionId)txid, tx.getMessages(), tx.getAcks());
}
} finally {
this.doingRecover = false;
}
}
/**
* @param message
* @throws IOException
*/
void addMessage(AMQMessageStore store, Message message, Location location) throws IOException {
AMQTx tx = getTx(message.getTransactionId(), location);
tx.add(store, message, location);
}
/**
* @param ack
* @throws IOException
*/
public void removeMessage(AMQMessageStore store, MessageAck ack, Location location) throws IOException {
AMQTx tx = getTx(ack.getTransactionId(), location);
tx.add(store, ack);
}
public void acknowledge(AMQTopicMessageStore store, JournalTopicAck ack, Location location) {
AMQTx tx = getTx(ack.getTransactionId(), location);
tx.add(store, ack);
}
public Location checkpoint() throws IOException {
// Nothing really to checkpoint.. since, we don't
// checkpoint tx operations in to long term store until they are
// committed.
// But we keep track of the first location of an operation
// that was associated with an active tx. The journal can not
// roll over active tx records.
Location minimumLocationInUse = null;
synchronized (inflightTransactions) {
for (Iterator<AMQTx> iter = inflightTransactions.values().iterator(); iter.hasNext();) {
AMQTx tx = iter.next();
Location location = tx.getLocation();
if (minimumLocationInUse == null || location.compareTo(minimumLocationInUse) < 0) {
minimumLocationInUse = location;
}
}
}
synchronized (preparedTransactions) {
for (Iterator<AMQTx> iter = preparedTransactions.values().iterator(); iter.hasNext();) {
AMQTx tx = iter.next();
Location location = tx.getLocation();
if (minimumLocationInUse == null || location.compareTo(minimumLocationInUse) < 0) {
minimumLocationInUse = location;
}
}
return minimumLocationInUse;
}
}
public boolean isDoingRecover() {
return doingRecover;
}
/**
* @return the preparedTransactions
*/
public Map<TransactionId, AMQTx> getPreparedTransactions() {
return this.preparedTransactions;
}
/**
* @param preparedTransactions the preparedTransactions to set
*/
public void setPreparedTransactions(Map<TransactionId, AMQTx> preparedTransactions) {
if (preparedTransactions != null) {
this.preparedTransactions.clear();
this.preparedTransactions.putAll(preparedTransactions);
}
}
}

View File

@ -1,100 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.store.amq;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.apache.activemq.command.JournalTopicAck;
import org.apache.activemq.command.Message;
import org.apache.activemq.command.MessageAck;
import org.apache.activemq.kaha.impl.async.Location;
/**
*/
/**
* Operations
*
*
*/
public class AMQTx {
private final Location location;
private List<AMQTxOperation> operations = new ArrayList<AMQTxOperation>();
public AMQTx(Location location) {
this.location = location;
}
public void add(AMQMessageStore store, Message msg, Location location) {
operations.add(new AMQTxOperation(AMQTxOperation.ADD_OPERATION_TYPE, store.getDestination(), msg,
location));
}
public void add(AMQMessageStore store, MessageAck ack) {
operations.add(new AMQTxOperation(AMQTxOperation.REMOVE_OPERATION_TYPE, store.getDestination(), ack,
null));
}
public void add(AMQTopicMessageStore store, JournalTopicAck ack) {
operations.add(new AMQTxOperation(AMQTxOperation.ACK_OPERATION_TYPE, store.getDestination(), ack,
null));
}
public Message[] getMessages() {
List<Object> list = new ArrayList<Object>();
for (Iterator<AMQTxOperation> iter = operations.iterator(); iter.hasNext();) {
AMQTxOperation op = iter.next();
if (op.getOperationType() == AMQTxOperation.ADD_OPERATION_TYPE) {
list.add(op.getData());
}
}
Message rc[] = new Message[list.size()];
list.toArray(rc);
return rc;
}
public MessageAck[] getAcks() {
List<Object> list = new ArrayList<Object>();
for (Iterator<AMQTxOperation> iter = operations.iterator(); iter.hasNext();) {
AMQTxOperation op = iter.next();
if (op.getOperationType() == AMQTxOperation.REMOVE_OPERATION_TYPE) {
list.add(op.getData());
}
}
MessageAck rc[] = new MessageAck[list.size()];
list.toArray(rc);
return rc;
}
/**
* @return the location
*/
public Location getLocation() {
return this.location;
}
public List<AMQTxOperation> getOperations() {
return operations;
}
public void setOperations(List<AMQTxOperation> operations) {
this.operations = operations;
}
}

View File

@ -1,134 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.store.amq;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.activemq.broker.ConnectionContext;
import org.apache.activemq.command.ActiveMQDestination;
import org.apache.activemq.command.JournalTopicAck;
import org.apache.activemq.command.Message;
import org.apache.activemq.command.MessageAck;
import org.apache.activemq.kaha.impl.async.Location;
import org.apache.activemq.util.ByteSequence;
import org.apache.activemq.wireformat.WireFormat;
/**
*/
public class AMQTxOperation {
public static final byte ADD_OPERATION_TYPE = 0;
public static final byte REMOVE_OPERATION_TYPE = 1;
public static final byte ACK_OPERATION_TYPE = 3;
private byte operationType;
private ActiveMQDestination destination;
private Object data;
private Location location;
public AMQTxOperation() {
}
public AMQTxOperation(byte operationType, ActiveMQDestination destination, Object data, Location location) {
this.operationType = operationType;
this.destination = destination;
this.data = data;
this.location = location;
}
/**
* @return the data
*/
public Object getData() {
return this.data;
}
/**
* @param data the data to set
*/
public void setData(Object data) {
this.data = data;
}
/**
* @return the location
*/
public Location getLocation() {
return this.location;
}
/**
* @param location the location to set
*/
public void setLocation(Location location) {
this.location = location;
}
/**
* @return the operationType
*/
public byte getOperationType() {
return this.operationType;
}
/**
* @param operationType the operationType to set
*/
public void setOperationType(byte operationType) {
this.operationType = operationType;
}
public boolean replay(AMQPersistenceAdapter adapter, ConnectionContext context) throws IOException {
boolean result = false;
AMQMessageStore store = (AMQMessageStore)adapter.createMessageStore(destination);
if (operationType == ADD_OPERATION_TYPE) {
result = store.replayAddMessage(context, (Message)data, location);
} else if (operationType == REMOVE_OPERATION_TYPE) {
result = store.replayRemoveMessage(context, (MessageAck)data);
} else {
JournalTopicAck ack = (JournalTopicAck)data;
result = ((AMQTopicMessageStore)store).replayAcknowledge(context, ack.getClientId(), ack
.getSubscritionName(), ack.getMessageId());
}
return result;
}
public void writeExternal(WireFormat wireFormat, DataOutput dos) throws IOException {
location.writeExternal(dos);
ByteSequence packet = wireFormat.marshal(getData());
dos.writeInt(packet.length);
dos.write(packet.data, packet.offset, packet.length);
packet = wireFormat.marshal(destination);
dos.writeInt(packet.length);
dos.write(packet.data, packet.offset, packet.length);
}
public void readExternal(WireFormat wireFormat, DataInput dis) throws IOException {
this.location = new Location();
this.location.readExternal(dis);
int size = dis.readInt();
byte[] data = new byte[size];
dis.readFully(data);
setData(wireFormat.unmarshal(new ByteSequence(data)));
size = dis.readInt();
data = new byte[size];
dis.readFully(data);
this.destination = (ActiveMQDestination)wireFormat.unmarshal(new ByteSequence(data));
}
}

Some files were not shown because too many files have changed in this diff Show More