https://issues.apache.org/jira/browse/AMQ-3374 - don't count every write in transaction size as only the number of pages we keep in memory is relevant (multiple writes to the same page will overwrite themselves)

git-svn-id: https://svn.apache.org/repos/asf/activemq/trunk@1224890 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Bosanac Dejan 2011-12-27 10:41:28 +00:00
parent 9a9fccbea4
commit 4a7475d7c2
1 changed files with 9 additions and 4 deletions

View File

@ -16,12 +16,14 @@
*/
package org.apache.kahadb.page;
import java.io.*;
import java.util.*;
import org.apache.kahadb.page.PageFile.PageWrite;
import org.apache.kahadb.util.*;
import java.io.*;
import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.TreeMap;
/**
* The class used to read/update a PageFile object. Using a transaction allows you to
* do multiple update operations in a single unit of work.
@ -700,9 +702,12 @@ public class Transaction implements Iterable<Page> {
@SuppressWarnings("unchecked")
private void write(final Page page, byte[] data) throws IOException {
Long key = page.getPageId();
size += data.length;
// how much pages we have for this transaction
size = writes.size() * pageFile.getPageSize();
PageWrite write;
if (size > maxTransactionSize) {
if (tmpFile == null) {
tmpFile = new RandomAccessFile(getTempFile(), "rw");