git-svn-id: https://svn.apache.org/repos/asf/activemq/trunk@1241221 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Timothy A. Bish 2012-02-06 22:24:58 +00:00
parent a1d5ff0316
commit cdba931deb
12 changed files with 275 additions and 274 deletions

View File

@ -55,7 +55,7 @@ public abstract class MessageDatabase extends ServiceSupport implements BrokerSe
protected BrokerService brokerService;
public static final String PROPERTY_LOG_SLOW_ACCESS_TIME = "org.apache.activemq.store.kahadb.LOG_SLOW_ACCESS_TIME";
public static final int LOG_SLOW_ACCESS_TIME = Integer.parseInt(System.getProperty(PROPERTY_LOG_SLOW_ACCESS_TIME, "0"));
public static final int LOG_SLOW_ACCESS_TIME = Integer.getInteger(PROPERTY_LOG_SLOW_ACCESS_TIME, 0);
public static final File DEFAULT_DIRECTORY = new File("KahaDB");
protected static final Buffer UNMATCHED;
static {

View File

@ -95,8 +95,7 @@ public class DataFileAccessorPool {
}
synchronized void clearUsedMark() {
for (Iterator<Pool> iter = pools.values().iterator(); iter.hasNext();) {
Pool pool = iter.next();
for (Pool pool : pools.values()) {
pool.clearUsedMark();
}
}
@ -153,8 +152,7 @@ public class DataFileAccessorPool {
return;
}
closed = true;
for (Iterator<Pool> iter = pools.values().iterator(); iter.hasNext();) {
Pool pool = iter.next();
for (Pool pool : pools.values()) {
pool.dispose();
}
pools.clear();

View File

@ -28,20 +28,21 @@ import java.util.zip.Checksum;
import org.apache.kahadb.util.ByteSequence;
import org.apache.kahadb.util.DataByteArrayOutputStream;
import org.apache.kahadb.util.LinkedNodeList;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* An optimized writer to do batch appends to a data file. This object is thread
* safe and gains throughput as you increase the number of concurrent writes it
* does.
*
*
*/
class DataFileAppender implements FileAppender {
private static final Logger logger = LoggerFactory.getLogger(DataFileAppender.class);
protected final Journal journal;
protected final Map<Journal.WriteKey, Journal.WriteCommand> inflightWrites;
protected final Object enqueueMutex = new Object() {
};
protected final Object enqueueMutex = new Object();
protected WriteBatch nextWriteBatch;
protected boolean shutdown;
@ -220,7 +221,8 @@ class DataFileAppender implements FileAppender {
final long start = System.currentTimeMillis();
enqueueMutex.wait();
if (maxStat > 0) {
System.err.println("Watiting for write to finish with full batch... millis: " + (System.currentTimeMillis() - start));
logger.info("Watiting for write to finish with full batch... millis: " +
(System.currentTimeMillis() - start));
}
}
} catch (InterruptedException e) {
@ -282,13 +284,11 @@ class DataFileAppender implements FileAppender {
DataByteArrayOutputStream buff = new DataByteArrayOutputStream(maxWriteBatchSize);
while (true) {
Object o = null;
// Block till we get a command.
synchronized (enqueueMutex) {
while (true) {
if (nextWriteBatch != null) {
o = nextWriteBatch;
wb = nextWriteBatch;
nextWriteBatch = null;
break;
}
@ -300,7 +300,6 @@ class DataFileAppender implements FileAppender {
enqueueMutex.notifyAll();
}
wb = (WriteBatch)o;
if (dataFile != wb.dataFile) {
if (file != null) {
file.setLength(dataFile.getLength());
@ -354,7 +353,7 @@ class DataFileAppender implements FileAppender {
for (;statIdx > 0;) {
all+= stats[--statIdx];
}
System.err.println("Ave writeSize: " + all/maxStat);
logger.info("Ave writeSize: " + all/maxStat);
}
}
file.write(sequence.getData(), sequence.getOffset(), sequence.getLength());
@ -411,7 +410,7 @@ class DataFileAppender implements FileAppender {
try {
write.onComplete.run();
} catch (Throwable e) {
e.printStackTrace();
logger.info("Add exception was raised while executing the run command for onComplete", e);
}
}
write = write.getNext();

View File

@ -77,7 +77,7 @@ public class Journal {
sequence.compact();
return sequence.getData();
} catch (IOException e) {
throw new RuntimeException("Could not create batch control record header.");
throw new RuntimeException("Could not create batch control record header.", e);
}
}
@ -121,7 +121,6 @@ public class Journal {
protected boolean enableAsyncDiskSync = true;
private Timer timer;
public synchronized void start() throws IOException {
if (started) {
return;
@ -141,9 +140,8 @@ public class Journal {
});
if (files != null) {
for (int i = 0; i < files.length; i++) {
for (File file : files) {
try {
File file = files[i];
String n = file.getName();
String numStr = n.substring(filePrefix.length(), n.length()-fileSuffix.length());
int num = Integer.parseInt(numStr);

View File

@ -19,19 +19,9 @@ package org.apache.kahadb.page;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.apache.kahadb.util.ByteSequence;
import org.apache.kahadb.util.DataByteArrayInputStream;
import org.apache.kahadb.util.DataByteArrayOutputStream;
import org.apache.kahadb.util.Marshaller;
/**
* A Page within a file.
*
*
*/
public class Page<T> {
@ -60,18 +50,17 @@ public class Page<T> {
this.pageId=pageId;
}
public void copy(Page<T> other) {
public Page<T> copy(Page<T> other) {
this.pageId = other.pageId;
this.txId = other.txId;
this.type = other.type;
this.next = other.next;
this.data = other.data;
return this;
}
Page<T> copy() {
Page<T> rc = new Page<T>();
rc.copy(this);
return rc;
return new Page<T>().copy(this);
}
void makeFree(long txId) {
@ -142,6 +131,4 @@ public class Page<T> {
public void setChecksum(int checksum) {
this.checksum = checksum;
}
}

View File

@ -60,10 +60,10 @@ public class PageFile {
private static final String FREE_FILE_SUFFIX = ".free";
// 4k Default page size.
public static final int DEFAULT_PAGE_SIZE = Integer.parseInt(System.getProperty("defaultPageSize", "" + 1024 * 4));
public static final int DEFAULT_WRITE_BATCH_SIZE = Integer.parseInt(System.getProperty("defaultWriteBatchSize", "" + 1000));
public static final int DEFAULT_PAGE_CACHE_SIZE = Integer.parseInt(System.getProperty("defaultPageCacheSize", "" + 100));
;
public static final int DEFAULT_PAGE_SIZE = Integer.getInteger("defaultPageSize", 1024*4);
public static final int DEFAULT_WRITE_BATCH_SIZE = Integer.getInteger("defaultWriteBatchSize", 1000);
public static final int DEFAULT_PAGE_CACHE_SIZE = Integer.getInteger("defaultPageCacheSize", 100);;
private static final int RECOVERY_FILE_HEADER_SIZE = 1024 * 4;
private static final int PAGE_FILE_HEADER_SIZE = 1024 * 4;
@ -198,14 +198,12 @@ public class PageFile {
void begin() {
if (currentLocation != -1) {
diskBoundLocation = currentLocation;
currentLocation = -1;
current = null;
} else {
diskBound = current;
}
current = null;
currentLocation = -1;
}
}
/**
* @return true if there is no pending writes to do.
@ -219,7 +217,6 @@ public class PageFile {
boolean isDone() {
return diskBound == null && diskBoundLocation == -1 && current == null && currentLocation == -1;
}
}
/**
@ -336,12 +333,10 @@ public class PageFile {
* @throws IOException
*/
private void delete(File file) throws IOException {
if (file.exists()) {
if (!file.delete()) {
if (file.exists() && !file.delete()) {
throw new IOException("Could not delete: " + file.getPath());
}
}
}
private void archive(File file, String suffix) throws IOException {
if (file.exists()) {
@ -407,13 +402,12 @@ public class PageFile {
// Scan all to find the free pages.
freeList = new SequenceSet();
for (Iterator i = tx().iterator(true); i.hasNext(); ) {
Page page = (Page) i.next();
for (Iterator<Page> i = tx().iterator(true); i.hasNext(); ) {
Page page = i.next();
if (page.getType() == Page.PAGE_FREE_TYPE) {
freeList.add(page.getPageId());
}
}
}
metaData.setCleanShutdown(false);
@ -427,7 +421,7 @@ public class PageFile {
startWriter();
} else {
throw new IllegalStateException("Cannot load the page file when it is allready loaded.");
throw new IllegalStateException("Cannot load the page file when it is already loaded.");
}
}
@ -516,7 +510,9 @@ public class PageFile {
try {
checkpointLatch.await();
} catch (InterruptedException e) {
throw new InterruptedIOException();
InterruptedIOException ioe = new InterruptedIOException();
ioe.initCause(e);
throw ioe;
}
}
@ -597,7 +593,7 @@ public class PageFile {
ByteArrayOutputStream os = new ByteArrayOutputStream(PAGE_FILE_HEADER_SIZE);
p.store(os, "");
if (os.size() > PAGE_FILE_HEADER_SIZE / 2) {
throw new IOException("Configuation is to larger than: " + PAGE_FILE_HEADER_SIZE / 2);
throw new IOException("Configuation is larger than: " + PAGE_FILE_HEADER_SIZE / 2);
}
// Fill the rest with space...
byte[] filler = new byte[(PAGE_FILE_HEADER_SIZE / 2) - os.size()];
@ -773,7 +769,6 @@ public class PageFile {
}
public void setWriteBatchSize(int writeBatchSize) {
assertNotLoaded();
this.writeBatchSize = writeBatchSize;
}
@ -833,9 +828,14 @@ public class PageFile {
Page<T> first = null;
int c = count;
while (c > 0) {
Page<T> page = new Page<T>(nextFreePageId.getAndIncrement());
page.makeFree(getNextWriteTransactionId());
// Perform the id's only once....
long pageId = nextFreePageId.getAndAdd(count);
long writeTxnId = nextTxid.getAndAdd(count);
while (c-- > 0) {
Page<T> page = new Page<T>(pageId++);
page.makeFree(writeTxnId++);
if (first == null) {
first = page;
@ -847,7 +847,6 @@ public class PageFile {
write(page, out.getData());
// LOG.debug("allocate writing: "+page.getPageId());
c--;
}
return first;
@ -985,9 +984,6 @@ public class PageFile {
// Internal Double write implementation follows...
///////////////////////////////////////////////////////////////////
/**
*
*/
private void pollWrites() {
try {
while (!stopWriter.get()) {
@ -1007,7 +1003,7 @@ public class PageFile {
writeBatch();
}
} catch (Throwable e) {
e.printStackTrace();
LOG.info("An exception was raised while performing poll writes", e);
} finally {
releaseCheckpointWaiter();
}

View File

@ -39,6 +39,8 @@ public class Transaction implements Iterable<Page> {
* and it's data is larger than what would fit into a single page.
*/
public class PageOverflowIOException extends IOException {
private static final long serialVersionUID = 1L;
public PageOverflowIOException(String message) {
super(message);
}
@ -49,6 +51,8 @@ public class Transaction implements Iterable<Page> {
* with an invalid page id.
*/
public class InvalidPageIOException extends IOException {
private static final long serialVersionUID = 1L;
private final long page;
public InvalidPageIOException(String message, long page) {
@ -92,7 +96,7 @@ public class Transaction implements Iterable<Page> {
// List of pages freed in this transaction
private final SequenceSet freeList = new SequenceSet();
private long maxTransactionSize = Long.parseLong(System.getProperty("maxKahaDBTxSize", "" + 10485760));
private long maxTransactionSize = Long.getLong("maxKahaDBTxSize", 10485760L);
private long size = 0;
@ -178,12 +182,14 @@ public class Transaction implements Iterable<Page> {
public <T> void free(Page<T> page, int count) throws IOException {
pageFile.assertLoaded();
long offsetPage = page.getPageId();
for (int i = 0; i < count; i++) {
while (count-- > 0) {
if (page == null) {
page = load(offsetPage + i, null);
page = load(offsetPage, null);
}
free(page);
page = null;
// Increment the offsetPage value since using it depends on the current count.
offsetPage++;
}
}
@ -318,7 +324,6 @@ public class Transaction implements Iterable<Page> {
}
@SuppressWarnings("unchecked")
@Override
public void close() throws IOException {
super.close();
@ -551,7 +556,6 @@ public class Transaction implements Iterable<Page> {
* @throws IllegalStateException
* if the PageFile is not loaded
*/
@SuppressWarnings("unchecked")
public Iterator<Page> iterator() {
return (Iterator<Page>)iterator(false);
}
@ -569,6 +573,7 @@ public class Transaction implements Iterable<Page> {
pageFile.assertLoaded();
return new Iterator<Page>() {
long nextId;
Page nextPage;
Page lastPage;
@ -699,7 +704,6 @@ public class Transaction implements Iterable<Page> {
/**
* Queues up a page write that should get done when commit() gets called.
*/
@SuppressWarnings("unchecked")
private void write(final Page page, byte[] data) throws IOException {
Long key = page.getPageId();
@ -796,5 +800,4 @@ public class Transaction implements Iterable<Page> {
}
}
}
}

View File

@ -18,19 +18,18 @@ package org.apache.kahadb.util;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Stack;
/**
*
*/
public final class IOHelper {
protected static final int MAX_DIR_NAME_LENGTH;
protected static final int MAX_FILE_NAME_LENGTH;
private static final int DEFAULT_BUFFER_SIZE = 4096;
private IOHelper() {
}
@ -105,7 +104,44 @@ public final class IOHelper {
return result;
}
public static boolean deleteFile(File fileToDelete) {
public static boolean delete(File top) {
boolean result = true;
Stack<File> files = new Stack<File>();
// Add file to the stack to be processed...
files.push(top);
// Process all files until none remain...
while (!files.isEmpty()) {
File file = files.pop();
if (file.isDirectory()) {
File list[] = file.listFiles();
if (list == null || list.length == 0) {
// The current directory contains no entries...
// delete directory and continue...
result &= file.delete();
} else {
// Add back the directory since it is not empty....
// and when we process it again it will be empty and can be
// deleted safely...
files.push(file);
for (File dirFile : list) {
if (dirFile.isDirectory()) {
// Place the directory on the stack...
files.push(dirFile);
} else {
// This is a simple file, delete it...
result &= dirFile.delete();
}
}
}
} else {
// This is a simple file, delete it...
result &= file.delete();
}
}
return result;
}
private static boolean deleteFile(File fileToDelete) {
if (fileToDelete == null || !fileToDelete.exists()) {
return true;
}
@ -114,7 +150,7 @@ public final class IOHelper {
return result;
}
public static boolean deleteChildren(File parent) {
private static boolean deleteChildren(File parent) {
if (parent == null || !parent.exists()) {
return false;
}
@ -142,7 +178,6 @@ public final class IOHelper {
return result;
}
public static void moveFile(File src, File targetDirectory) throws IOException {
if (!src.renameTo(new File(targetDirectory, src.getName()))) {
throw new IOException("Failed to move " + src + " to " + targetDirectory);
@ -167,11 +202,10 @@ public final class IOHelper {
}
static {
MAX_DIR_NAME_LENGTH = Integer.valueOf(System.getProperty("MaximumDirNameLength","200")).intValue();
MAX_FILE_NAME_LENGTH = Integer.valueOf(System.getProperty("MaximumFileNameLength","64")).intValue();
MAX_DIR_NAME_LENGTH = Integer.getInteger("MaximumDirNameLength",200);
MAX_FILE_NAME_LENGTH = Integer.getInteger("MaximumFileNameLength",64);
}
public static void mkdirs(File dir) throws IOException {
if (dir.exists()) {
if (!dir.isDirectory()) {

View File

@ -30,7 +30,7 @@ import java.util.Date;
*/
public class LockFile {
private static final boolean DISABLE_FILE_LOCK = "true".equals(System.getProperty("java.nio.channels.FileLock.broken", "false"));
private static final boolean DISABLE_FILE_LOCK = Boolean.getBoolean("java.nio.channels.FileLock.broken");
final private File file;
private FileLock lock;

View File

@ -49,8 +49,7 @@ public abstract class IndexBenchmark extends TestCase {
public void setUp() throws Exception {
ROOT_DIR = new File(IOHelper.getDefaultDataDirectory());
IOHelper.mkdirs(ROOT_DIR);
IOHelper.deleteChildren(ROOT_DIR);
IOHelper.delete(ROOT_DIR);
pf = new PageFile(ROOT_DIR, getClass().getName());
pf.load();
@ -58,7 +57,7 @@ public abstract class IndexBenchmark extends TestCase {
protected void tearDown() throws Exception {
Transaction tx = pf.tx();
for (Index i : indexes.values()) {
for (Index<?, ?> i : indexes.values()) {
try {
i.unload(tx);
} catch (Throwable ignore) {

View File

@ -43,9 +43,7 @@ public abstract class IndexTestSupport extends TestCase {
protected void setUp() throws Exception {
super.setUp();
directory = new File(IOHelper.getDefaultDataDirectory());
IOHelper.mkdirs(directory);
IOHelper.deleteChildren(directory);
IOHelper.delete(directory);
}
protected void tearDown() throws Exception {

View File

@ -23,6 +23,7 @@ import java.util.concurrent.TimeUnit;
import junit.framework.TestCase;
import org.apache.kahadb.journal.Journal;
import org.apache.kahadb.util.ByteSequence;
import org.apache.kahadb.util.IOHelper;
public class JournalTest extends TestCase {
protected static final int DEFAULT_MAX_BATCH_SIZE = 1024 * 1024 * 4;
@ -46,19 +47,7 @@ public class JournalTest extends TestCase {
@Override
public void tearDown() throws Exception {
dataManager.close();
deleteFilesInDirectory(dir);
dir.delete();
}
private void deleteFilesInDirectory(File directory) {
File[] files = directory.listFiles();
for (int i=0; i<files.length; i++) {
File f = files[i];
if (f.isDirectory()) {
deleteFilesInDirectory(f);
}
f.delete();
}
IOHelper.delete(dir);
}
public void testBatchWriteCallbackCompleteAfterTimeout() throws Exception {