mirror of https://github.com/apache/activemq.git
Updated fix for this issue allows for enabling non-forced metadata updates to the file channel via FileChannel#force(false) enable this by defining "org.apache.activemq.kahaDB.files.skipMetadataUpdate=true"
This commit is contained in:
parent
efb988655c
commit
ef619b6a9b
|
@ -17,11 +17,11 @@
|
|||
package org.apache.activemq.store.kahadb.disk.journal;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.RandomAccessFile;
|
||||
import java.util.zip.Adler32;
|
||||
import java.util.zip.Checksum;
|
||||
import org.apache.activemq.util.ByteSequence;
|
||||
|
||||
import org.apache.activemq.store.kahadb.disk.util.DataByteArrayOutputStream;
|
||||
import org.apache.activemq.util.ByteSequence;
|
||||
import org.apache.activemq.util.RecoverableRandomAccessFile;
|
||||
|
||||
/**
|
||||
|
@ -49,6 +49,7 @@ class CallerBufferingDataFileAppender extends DataFileAppender {
|
|||
append(write);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void append(Journal.WriteCommand write) throws IOException {
|
||||
super.append(write);
|
||||
forceToDisk |= appendToBuffer(write, buff);
|
||||
|
@ -155,7 +156,7 @@ class CallerBufferingDataFileAppender extends DataFileAppender {
|
|||
}
|
||||
|
||||
if (forceToDisk) {
|
||||
file.getFD().sync();
|
||||
file.sync();
|
||||
}
|
||||
|
||||
Journal.WriteCommand lastWrite = wb.writes.getTail();
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
package org.apache.activemq.store.kahadb.disk.journal;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.RandomAccessFile;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.activemq.util.ByteSequence;
|
||||
|
@ -70,7 +69,7 @@ final class DataFileAccessor {
|
|||
throw new IOException("Invalid location: " + location);
|
||||
}
|
||||
|
||||
Journal.WriteCommand asyncWrite = (Journal.WriteCommand)inflightWrites.get(new Journal.WriteKey(location));
|
||||
Journal.WriteCommand asyncWrite = inflightWrites.get(new Journal.WriteKey(location));
|
||||
if (asyncWrite != null) {
|
||||
return asyncWrite.data;
|
||||
}
|
||||
|
@ -105,7 +104,7 @@ final class DataFileAccessor {
|
|||
}
|
||||
|
||||
public void readLocationDetails(Location location) throws IOException {
|
||||
Journal.WriteCommand asyncWrite = (Journal.WriteCommand)inflightWrites.get(new Journal.WriteKey(location));
|
||||
Journal.WriteCommand asyncWrite = inflightWrites.get(new Journal.WriteKey(location));
|
||||
if (asyncWrite != null) {
|
||||
location.setSize(asyncWrite.location.getSize());
|
||||
location.setType(asyncWrite.location.getType());
|
||||
|
@ -155,9 +154,7 @@ final class DataFileAccessor {
|
|||
int size = Math.min(data.getLength(), location.getSize());
|
||||
file.write(data.getData(), data.getOffset(), size);
|
||||
if (sync) {
|
||||
file.getFD().sync();
|
||||
file.sync();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -18,16 +18,15 @@ package org.apache.activemq.store.kahadb.disk.journal;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.io.RandomAccessFile;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.zip.Adler32;
|
||||
import java.util.zip.Checksum;
|
||||
|
||||
import org.apache.activemq.util.ByteSequence;
|
||||
import org.apache.activemq.store.kahadb.disk.util.DataByteArrayOutputStream;
|
||||
import org.apache.activemq.store.kahadb.disk.util.LinkedNodeList;
|
||||
import org.apache.activemq.util.ByteSequence;
|
||||
import org.apache.activemq.util.RecoverableRandomAccessFile;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -67,10 +66,12 @@ class DataFileAppender implements FileAppender {
|
|||
hash = (int)(file ^ offset);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return hash;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj instanceof WriteKey) {
|
||||
WriteKey di = (WriteKey)obj;
|
||||
|
@ -132,6 +133,7 @@ class DataFileAppender implements FileAppender {
|
|||
this.syncOnComplete = this.journal.isEnableAsyncDiskSync();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Location storeItem(ByteSequence data, byte type, boolean sync) throws IOException {
|
||||
|
||||
// Write the packet our internal buffer.
|
||||
|
@ -160,6 +162,7 @@ class DataFileAppender implements FileAppender {
|
|||
return location;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Location storeItem(ByteSequence data, byte type, Runnable onComplete) throws IOException {
|
||||
// Write the packet our internal buffer.
|
||||
int size = data.getLength() + Journal.RECORD_HEAD_SPACE;
|
||||
|
@ -185,6 +188,7 @@ class DataFileAppender implements FileAppender {
|
|||
if (!running) {
|
||||
running = true;
|
||||
thread = new Thread() {
|
||||
@Override
|
||||
public void run() {
|
||||
processQueue();
|
||||
}
|
||||
|
@ -246,6 +250,7 @@ class DataFileAppender implements FileAppender {
|
|||
return new WriteBatch(file, file.getLength(), write);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
synchronized (enqueueMutex) {
|
||||
if (!shutdown) {
|
||||
|
@ -365,7 +370,7 @@ class DataFileAppender implements FileAppender {
|
|||
}
|
||||
|
||||
if (forceToDisk) {
|
||||
file.getFD().sync();
|
||||
file.sync();
|
||||
}
|
||||
|
||||
Journal.WriteCommand lastWrite = wb.writes.getTail();
|
||||
|
|
|
@ -42,9 +42,15 @@ import java.util.concurrent.atomic.AtomicLong;
|
|||
import java.util.zip.Adler32;
|
||||
import java.util.zip.Checksum;
|
||||
|
||||
import org.apache.activemq.util.*;
|
||||
import org.apache.activemq.store.kahadb.disk.util.Sequence;
|
||||
import org.apache.activemq.store.kahadb.disk.util.SequenceSet;
|
||||
import org.apache.activemq.util.DataByteArrayOutputStream;
|
||||
import org.apache.activemq.util.IOExceptionSupport;
|
||||
import org.apache.activemq.util.IOHelper;
|
||||
import org.apache.activemq.util.IntrospectionSupport;
|
||||
import org.apache.activemq.util.LFUCache;
|
||||
import org.apache.activemq.util.LRUCache;
|
||||
import org.apache.activemq.util.RecoverableRandomAccessFile;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -75,7 +81,7 @@ public class PageFile {
|
|||
private static final Logger LOG = LoggerFactory.getLogger(PageFile.class);
|
||||
|
||||
// A PageFile will use a couple of files in this directory
|
||||
private File directory;
|
||||
private final File directory;
|
||||
// And the file names in that directory will be based on this name.
|
||||
private final String name;
|
||||
|
||||
|
@ -97,7 +103,7 @@ public class PageFile {
|
|||
// The number of pages in the current recovery buffer
|
||||
private int recoveryPageCount;
|
||||
|
||||
private AtomicBoolean loaded = new AtomicBoolean();
|
||||
private final AtomicBoolean loaded = new AtomicBoolean();
|
||||
// The number of pages we are aiming to write every time we
|
||||
// write to disk.
|
||||
int writeBatchSize = DEFAULT_WRITE_BATCH_SIZE;
|
||||
|
@ -118,23 +124,23 @@ public class PageFile {
|
|||
private boolean enabledWriteThread = false;
|
||||
|
||||
// These are used if enableAsyncWrites==true
|
||||
private AtomicBoolean stopWriter = new AtomicBoolean();
|
||||
private final AtomicBoolean stopWriter = new AtomicBoolean();
|
||||
private Thread writerThread;
|
||||
private CountDownLatch checkpointLatch;
|
||||
|
||||
// Keeps track of writes that are being written to disk.
|
||||
private TreeMap<Long, PageWrite> writes = new TreeMap<Long, PageWrite>();
|
||||
private final TreeMap<Long, PageWrite> writes = new TreeMap<Long, PageWrite>();
|
||||
|
||||
// Keeps track of free pages.
|
||||
private final AtomicLong nextFreePageId = new AtomicLong();
|
||||
private SequenceSet freeList = new SequenceSet();
|
||||
|
||||
private AtomicLong nextTxid = new AtomicLong();
|
||||
private final AtomicLong nextTxid = new AtomicLong();
|
||||
|
||||
// Persistent settings stored in the page file.
|
||||
private MetaData metaData;
|
||||
|
||||
private ArrayList<File> tmpFilesForRemoval = new ArrayList<File>();
|
||||
private final ArrayList<File> tmpFilesForRemoval = new ArrayList<File>();
|
||||
|
||||
private boolean useLFRUEviction = false;
|
||||
private float LFUEvictionFactor = 0.2f;
|
||||
|
@ -521,6 +527,7 @@ public class PageFile {
|
|||
}
|
||||
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Page File: " + getMainPageFile();
|
||||
}
|
||||
|
@ -610,10 +617,10 @@ public class PageFile {
|
|||
// So we don't loose it.. write it 2 times...
|
||||
writeFile.seek(0);
|
||||
writeFile.write(d);
|
||||
writeFile.getFD().sync();
|
||||
writeFile.sync();
|
||||
writeFile.seek(PAGE_FILE_HEADER_SIZE / 2);
|
||||
writeFile.write(d);
|
||||
writeFile.getFD().sync();
|
||||
writeFile.sync();
|
||||
}
|
||||
|
||||
private void storeFreeList() throws IOException {
|
||||
|
@ -880,14 +887,17 @@ public class PageFile {
|
|||
private <T> void write(Page<T> page, byte[] data) throws IOException {
|
||||
final PageWrite write = new PageWrite(page, data);
|
||||
Entry<Long, PageWrite> entry = new Entry<Long, PageWrite>() {
|
||||
@Override
|
||||
public Long getKey() {
|
||||
return write.getPage().getPageId();
|
||||
}
|
||||
|
||||
@Override
|
||||
public PageWrite getValue() {
|
||||
return write;
|
||||
}
|
||||
|
||||
@Override
|
||||
public PageWrite setValue(PageWrite value) {
|
||||
return null;
|
||||
}
|
||||
|
@ -1081,9 +1091,9 @@ public class PageFile {
|
|||
if (enableDiskSyncs) {
|
||||
// Sync to make sure recovery buffer writes land on disk..
|
||||
if (enableRecoveryFile) {
|
||||
recoveryFile.getFD().sync();
|
||||
writeFile.sync();
|
||||
}
|
||||
writeFile.getFD().sync();
|
||||
writeFile.sync();
|
||||
}
|
||||
} finally {
|
||||
synchronized (writes) {
|
||||
|
@ -1185,7 +1195,7 @@ public class PageFile {
|
|||
}
|
||||
|
||||
// And sync it to disk
|
||||
writeFile.getFD().sync();
|
||||
writeFile.sync();
|
||||
return nextTxId;
|
||||
}
|
||||
|
||||
|
|
|
@ -27,6 +27,9 @@ import java.util.Arrays;
|
|||
*/
|
||||
public class DiskBenchmark {
|
||||
|
||||
private static final boolean SKIP_METADATA_UPDATE =
|
||||
Boolean.getBoolean("org.apache.activemq.file.skipMetadataUpdate");
|
||||
|
||||
boolean verbose;
|
||||
// reads and writes work with 4k of data at a time.
|
||||
int bs = 1024 * 4;
|
||||
|
@ -83,23 +86,12 @@ public class DiskBenchmark {
|
|||
|
||||
@Override
|
||||
public String toString() {
|
||||
return
|
||||
"Writes: \n" +
|
||||
" "+writes+" writes of size "+size+" written in "+(writeDuration/1000.0)+" seconds.\n"+
|
||||
" "+getWriteRate()+" writes/second.\n"+
|
||||
" "+getWriteSizeRate()+" megs/second.\n"+
|
||||
"\n"+
|
||||
"Sync Writes: \n" +
|
||||
" "+syncWrites+" writes of size "+size+" written in "+(syncWriteDuration/1000.0)+" seconds.\n"+
|
||||
" "+getSyncWriteRate()+" writes/second.\n"+
|
||||
" "+getSyncWriteSizeRate()+" megs/second.\n"+
|
||||
"\n"+
|
||||
"Reads: \n" +
|
||||
" "+reads+" reads of size "+size+" read in "+(readDuration/1000.0)+" seconds.\n"+
|
||||
" "+getReadRate()+" writes/second.\n"+
|
||||
" "+getReadSizeRate()+" megs/second.\n"+
|
||||
"\n"+
|
||||
"";
|
||||
return "Writes: \n" + " " + writes + " writes of size " + size + " written in " + (writeDuration / 1000.0) + " seconds.\n" + " " + getWriteRate()
|
||||
+ " writes/second.\n" + " " + getWriteSizeRate() + " megs/second.\n" + "\n" + "Sync Writes: \n" + " " + syncWrites + " writes of size "
|
||||
+ size + " written in " + (syncWriteDuration / 1000.0) + " seconds.\n" + " " + getSyncWriteRate() + " writes/second.\n" + " "
|
||||
+ getSyncWriteSizeRate() + " megs/second.\n" + "\n" + "Reads: \n" + " " + reads + " reads of size " + size + " read in "
|
||||
+ (readDuration / 1000.0) + " seconds.\n" + " " + getReadRate() + " writes/second.\n" + " " + getReadSizeRate() + " megs/second.\n" + "\n"
|
||||
+ "";
|
||||
}
|
||||
|
||||
private float getWriteSizeRate() {
|
||||
|
@ -129,6 +121,7 @@ public class DiskBenchmark {
|
|||
rc /= (syncWriteDuration / 1000.0); // get rate.
|
||||
return rc;
|
||||
}
|
||||
|
||||
private float getReadSizeRate() {
|
||||
float rc = reads;
|
||||
rc *= size;
|
||||
|
@ -200,7 +193,6 @@ public class DiskBenchmark {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
public Report benchmark(File file) throws IOException {
|
||||
Report rc = new Report();
|
||||
|
||||
|
@ -231,11 +223,11 @@ public class DiskBenchmark {
|
|||
break;
|
||||
}
|
||||
}
|
||||
// Sync to disk so that the we actually write the data to disk.. otherwise
|
||||
// OS buffering might not really do the write.
|
||||
raf.getFD().sync();
|
||||
// Sync to disk so that the we actually write the data to disk..
|
||||
// otherwise OS buffering might not really do the write.
|
||||
raf.getChannel().force(!SKIP_METADATA_UPDATE);
|
||||
}
|
||||
raf.getFD().sync();
|
||||
raf.getChannel().force(!SKIP_METADATA_UPDATE);
|
||||
raf.close();
|
||||
now = System.currentTimeMillis();
|
||||
|
||||
|
@ -254,7 +246,7 @@ public class DiskBenchmark {
|
|||
for (long i = 0; i + data.length < size; i += data.length) {
|
||||
raf.seek(i);
|
||||
raf.write(data);
|
||||
raf.getFD().sync();
|
||||
raf.getChannel().force(false);
|
||||
ioCount++;
|
||||
now = System.currentTimeMillis();
|
||||
if ((now - start) > sampleInterval) {
|
||||
|
@ -293,44 +285,35 @@ public class DiskBenchmark {
|
|||
return rc;
|
||||
}
|
||||
|
||||
|
||||
public boolean isVerbose() {
|
||||
return verbose;
|
||||
}
|
||||
|
||||
|
||||
public void setVerbose(boolean verbose) {
|
||||
this.verbose = verbose;
|
||||
}
|
||||
|
||||
|
||||
public int getBs() {
|
||||
return bs;
|
||||
}
|
||||
|
||||
|
||||
public void setBs(int bs) {
|
||||
this.bs = bs;
|
||||
}
|
||||
|
||||
|
||||
public long getSize() {
|
||||
return size;
|
||||
}
|
||||
|
||||
|
||||
public void setSize(long size) {
|
||||
this.size = size;
|
||||
}
|
||||
|
||||
|
||||
public long getSampleInterval() {
|
||||
return sampleInterval;
|
||||
}
|
||||
|
||||
|
||||
public void setSampleInterval(long sampleInterval) {
|
||||
this.sampleInterval = sampleInterval;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -16,10 +16,18 @@
|
|||
*/
|
||||
package org.apache.activemq.util;
|
||||
|
||||
import java.io.*;
|
||||
import java.io.File;
|
||||
import java.io.FileDescriptor;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.RandomAccessFile;
|
||||
import java.nio.channels.FileChannel;
|
||||
|
||||
public class RecoverableRandomAccessFile implements java.io.DataOutput, java.io.DataInput, java.io.Closeable {
|
||||
|
||||
private static final boolean SKIP_METADATA_UPDATE =
|
||||
Boolean.getBoolean("org.apache.activemq.kahaDB.files.skipMetadataUpdate");
|
||||
|
||||
RandomAccessFile raf;
|
||||
File file;
|
||||
String mode;
|
||||
|
@ -389,6 +397,24 @@ public class RecoverableRandomAccessFile implements java.io.DataOutput, java.io.
|
|||
}
|
||||
}
|
||||
|
||||
public void sync() throws IOException {
|
||||
try {
|
||||
getRaf().getChannel().force(!SKIP_METADATA_UPDATE);;
|
||||
} catch (IOException ioe) {
|
||||
handleException();
|
||||
throw ioe;
|
||||
}
|
||||
}
|
||||
|
||||
public FileChannel getChannel() throws IOException {
|
||||
try {
|
||||
return getRaf().getChannel();
|
||||
} catch (IOException ioe) {
|
||||
handleException();
|
||||
throw ioe;
|
||||
}
|
||||
}
|
||||
|
||||
public int read(byte[] b, int off, int len) throws IOException {
|
||||
try {
|
||||
return getRaf().read(b, off, len);
|
||||
|
|
Loading…
Reference in New Issue