[HHH-4520] (Infinispan second level cache integration can cache stale collection data) Ported fix. testManyUsers has been disabled while ISPN-277 gets fixed. Finally, Infinispan version has been upgraded to 4.0.0.CR2.

git-svn-id: https://svn.jboss.org/repos/hibernate/core/trunk@18005 1b8cb986-b30d-0410-93ca-fae66ebed9b2
This commit is contained in:
Galder Zamarreno 2009-11-18 16:56:13 +00:00
parent 1658d1f58b
commit abc165eaba
36 changed files with 2100 additions and 345 deletions

View File

@ -17,7 +17,7 @@
<description>Integration of Hibernate with Infinispan</description>
<properties>
<version.infinispan>4.0.0-SNAPSHOT</version.infinispan>
<version.infinispan>4.0.0.CR2</version.infinispan>
<version.hsqldb>1.8.0.2</version.hsqldb>
<version.cglib>2.2</version.cglib>
<version.javassist>3.4.GA</version.javassist>

View File

@ -32,8 +32,8 @@ import org.infinispan.Cache;
import org.infinispan.config.Configuration;
import org.infinispan.manager.CacheManager;
import org.infinispan.manager.DefaultCacheManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* A {@link RegionFactory} for <a href="http://www.jboss.org/infinispan">Infinispan</a>-backed cache
@ -44,13 +44,13 @@ import org.slf4j.LoggerFactory;
* @since 3.5
*/
public class InfinispanRegionFactory implements RegionFactory {
private static final Logger log = LoggerFactory.getLogger(InfinispanRegionFactory.class);
private static final Log log = LogFactory.getLog(InfinispanRegionFactory.class);
private static final String PREFIX = "hibernate.cache.infinispan.";
private static final String CONFIG_SUFFIX = ".cfg";
private static final String STRATEGY_SUFFIX = ".eviction.strategy";
private static final String WAKE_UP_INTERVAL_SUFFIX = ".eviction.wake_up_interval";

View File

@ -32,8 +32,8 @@ import org.hibernate.cache.RegionFactory;
import org.hibernate.util.NamingHelper;
import org.hibernate.util.PropertiesHelper;
import org.infinispan.manager.CacheManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* A {@link RegionFactory} for <a href="http://www.jboss.org/infinispan">Infinispan</a>-backed cache
@ -44,7 +44,7 @@ import org.slf4j.LoggerFactory;
*/
public class JndiInfinispanRegionFactory extends InfinispanRegionFactory {
private static final Logger log = LoggerFactory.getLogger(JndiInfinispanRegionFactory.class);
private static final Log log = LogFactory.getLog(JndiInfinispanRegionFactory.class);
/**
* Specifies the JNDI name under which the {@link CacheManager} to use is bound.

View File

@ -0,0 +1,478 @@
/*
* Hibernate, Relational Persistence for Idiomatic Java
*
* Copyright (c) 2009, Red Hat, Inc or third-party contributors as
* indicated by the @author tags or express copyright attribution
* statements applied by the authors.  All third-party contributions are
* distributed under license by Red Hat Middleware LLC.
*
* This copyrighted material is made available to anyone wishing to use, modify,
* copy, or redistribute it subject to the terms and conditions of the GNU
* Lesser General Public License, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
* for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this distribution; if not, write to:
* Free Software Foundation, Inc.
* 51 Franklin Street, Fifth Floor
* Boston, MA 02110-1301 USA
*/
package org.hibernate.cache.infinispan.access;
import java.lang.ref.WeakReference;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import javax.transaction.SystemException;
import javax.transaction.Transaction;
import javax.transaction.TransactionManager;
import org.hibernate.cache.CacheException;
/**
* Encapsulates logic to allow a {@link TransactionalAccessDelegate} to determine
* whether a {@link TransactionalAccessDelegate#putFromLoad(Object, Object, long, Object, boolean)
* call should be allowed to update the cache. A <code>putFromLoad</code> has
* the potential to store stale data, since the data may have been removed from the
* database and the cache between the time when the data was read from the database
* and the actual call to <code>putFromLoad</code>.
*
* @author Brian Stansberry
*
* @version $Revision: $
*/
public class PutFromLoadValidator {
/**
* Period in ms after a removal during which a call to {@link #isPutValid(Object)} that hasn't
* been {@link #registerPendingPut(Object) pre-registered} (aka a "naked put") will return false.
*/
public static final long NAKED_PUT_INVALIDATION_PERIOD = 10 * 1000;
/** Period after which a pending put is placed in the over-age queue */
private static final long PENDING_PUT_OVERAGE_PERIOD = 5 * 1000;
/** Period before which we stop trying to clean out pending puts */
private static final long PENDING_PUT_RECENT_PERIOD = 2 * 1000;
/**
* Period after which a pending put is never expected to come in and should be cleaned
*/
private static final long MAX_PENDING_PUT_DELAY = 2 * 60 * 1000;
/**
* Used to determine whether the owner of a pending put is a thread or a transaction
*/
private final TransactionManager transactionManager;
private final long nakedPutInvalidationPeriod;
private final long pendingPutOveragePeriod;
private final long pendingPutRecentPeriod;
private final long maxPendingPutDelay;
/**
* Registry of expected, future, isPutValid calls. If a key+owner is registered in this map, it
* is not a "naked put" and is allowed to proceed.
*/
private final ConcurrentMap<Object, PendingPutMap> pendingPuts = new ConcurrentHashMap<Object, PendingPutMap>();
/**
* List of pending puts. Used to ensure we don't leak memory via the pendingPuts map
*/
private final List<WeakReference<PendingPut>> pendingQueue = new LinkedList<WeakReference<PendingPut>>();
/**
* Separate list of pending puts that haven't been resolved within PENDING_PUT_OVERAGE_PERIOD.
* Used to ensure we don't leak memory via the pendingPuts map. Tracked separately from more
* recent pending puts for efficiency reasons.
*/
private final List<WeakReference<PendingPut>> overagePendingQueue = new LinkedList<WeakReference<PendingPut>>();
/** Lock controlling access to pending put queues */
private final Lock pendingLock = new ReentrantLock();
private final ConcurrentMap<Object, Long> recentRemovals = new ConcurrentHashMap<Object, Long>();
/**
* List of recent removals. Used to ensure we don't leak memory via the recentRemovals map
*/
private final List<RecentRemoval> removalsQueue = new LinkedList<RecentRemoval>();
/**
* The time when the first element in removalsQueue will expire. No reason to do housekeeping on
* the queue before this time.
*/
private volatile long earliestRemovalTimestamp;
/** Lock controlling access to removalsQueue */
private final Lock removalsLock = new ReentrantLock();
/**
* The time of the last call to regionRemoved(), plus NAKED_PUT_INVALIDATION_PERIOD. All naked
* puts will be rejected until the current time is greater than this value.
*/
private volatile long invalidationTimestamp;
/**
* Creates a new PutFromLoadValidator.
*
* @param transactionManager
* transaction manager to use to associated changes with a transaction; may be
* <code>null</code>
*/
public PutFromLoadValidator(TransactionManager transactionManager) {
this(transactionManager, NAKED_PUT_INVALIDATION_PERIOD, PENDING_PUT_OVERAGE_PERIOD,
PENDING_PUT_RECENT_PERIOD, MAX_PENDING_PUT_DELAY);
}
/**
* Constructor variant for use by unit tests; allows control of various timeouts by the test.
*/
protected PutFromLoadValidator(TransactionManager transactionManager,
long nakedPutInvalidationPeriod, long pendingPutOveragePeriod,
long pendingPutRecentPeriod, long maxPendingPutDelay) {
this.transactionManager = transactionManager;
this.nakedPutInvalidationPeriod = nakedPutInvalidationPeriod;
this.pendingPutOveragePeriod = pendingPutOveragePeriod;
this.pendingPutRecentPeriod = pendingPutRecentPeriod;
this.maxPendingPutDelay = maxPendingPutDelay;
}
// ----------------------------------------------------------------- Public
public boolean isPutValid(Object key) {
boolean valid = false;
long now = System.currentTimeMillis();
PendingPutMap pending = pendingPuts.get(key);
if (pending != null) {
synchronized (pending) {
PendingPut toCancel = pending.remove(getOwnerForPut());
valid = toCancel != null;
if (valid) {
toCancel.completed = true;
if (pending.size() == 0) {
pendingPuts.remove(key);
}
}
}
}
if (!valid) {
if (now > invalidationTimestamp) {
Long removedTime = recentRemovals.get(key);
if (removedTime == null || now > removedTime.longValue()) {
valid = true;
}
}
}
cleanOutdatedPendingPuts(now, true);
return valid;
}
public void keyRemoved(Object key) {
// Invalidate any pending puts
pendingPuts.remove(key);
// Record when this occurred to invalidate later naked puts
RecentRemoval removal = new RecentRemoval(key, this.nakedPutInvalidationPeriod);
recentRemovals.put(key, removal.timestamp);
// Don't let recentRemovals map become a memory leak
RecentRemoval toClean = null;
boolean attemptClean = removal.timestamp.longValue() > earliestRemovalTimestamp;
removalsLock.lock();
try {
removalsQueue.add(removal);
if (attemptClean) {
if (removalsQueue.size() > 1) { // we have at least one as we
// just added it
toClean = removalsQueue.remove(0);
}
earliestRemovalTimestamp = removalsQueue.get(0).timestamp.longValue();
}
} finally {
removalsLock.unlock();
}
if (toClean != null) {
Long cleaned = recentRemovals.get(toClean.key);
if (cleaned != null && cleaned.equals(toClean.timestamp)) {
cleaned = recentRemovals.remove(toClean.key);
if (cleaned != null && cleaned.equals(toClean.timestamp) == false) {
// Oops; removed the wrong timestamp; restore it
recentRemovals.putIfAbsent(toClean.key, cleaned);
}
}
}
}
public void cleared() {
invalidationTimestamp = System.currentTimeMillis() + this.nakedPutInvalidationPeriod;
pendingLock.lock();
try {
removalsLock.lock();
try {
pendingPuts.clear();
pendingQueue.clear();
overagePendingQueue.clear();
recentRemovals.clear();
removalsQueue.clear();
earliestRemovalTimestamp = invalidationTimestamp;
} finally {
removalsLock.unlock();
}
} finally {
pendingLock.unlock();
}
}
/**
* Notifies this validator that it is expected that a database read followed by a subsequent
* {@link #isPutValid(Object)} call will occur. The intent is this method would be called
* following a cache miss wherein it is expected that a database read plus cache put will occur.
* Calling this method allows the validator to treat the subsequent <code>isPutValid</code> as if
* the database read occurred when this method was invoked. This allows the validator to compare
* the timestamp of this call against the timestamp of subsequent removal notifications. A put
* that occurs without this call preceding it is "naked"; i.e the validator must assume the put
* is not valid if any relevant removal has occurred within
* {@link #NAKED_PUT_INVALIDATION_PERIOD} milliseconds.
*
* @param key
* key that will be used for subsequent put
*/
public void registerPendingPut(Object key) {
PendingPut pendingPut = new PendingPut(key, getOwnerForPut());
PendingPutMap pendingForKey = new PendingPutMap();
synchronized (pendingForKey) {
for (;;) {
PendingPutMap existing = pendingPuts.putIfAbsent(key, pendingForKey);
if (existing != null && existing != pendingForKey) {
synchronized (existing) {
existing.put(pendingPut);
PendingPutMap doublecheck = pendingPuts.putIfAbsent(key, existing);
if (doublecheck == null || doublecheck == existing) {
break;
}
// else we hit a race and need to loop to try again
}
} else {
pendingForKey.put(pendingPut);
break;
}
}
}
// Guard against memory leaks
preventOutdatedPendingPuts(pendingPut);
}
// -------------------------------------------------------------- Protected
/** Only for use by unit tests; may be removed at any time */
protected int getPendingPutQueueLength() {
pendingLock.lock();
try {
return pendingQueue.size();
} finally {
pendingLock.unlock();
}
}
/** Only for use by unit tests; may be removed at any time */
protected int getOveragePendingPutQueueLength() {
pendingLock.lock();
try {
return overagePendingQueue.size();
} finally {
pendingLock.unlock();
}
}
/** Only for use by unit tests; may be removed at any time */
protected int getRemovalQueueLength() {
removalsLock.lock();
try {
return removalsQueue.size();
} finally {
removalsLock.unlock();
}
}
// ---------------------------------------------------------------- Private
private Object getOwnerForPut() {
Transaction tx = null;
try {
if (transactionManager != null) {
tx = transactionManager.getTransaction();
}
} catch (SystemException se) {
throw new CacheException("Could not obtain transaction", se);
}
return tx == null ? Thread.currentThread() : tx;
}
private void preventOutdatedPendingPuts(PendingPut pendingPut) {
pendingLock.lock();
try {
pendingQueue.add(new WeakReference<PendingPut>(pendingPut));
cleanOutdatedPendingPuts(pendingPut.timestamp, false);
} finally {
pendingLock.unlock();
}
}
private void cleanOutdatedPendingPuts(long now, boolean lock) {
PendingPut toClean = null;
if (lock) {
pendingLock.lock();
}
try {
// Clean items out of the basic queue
long overaged = now - this.pendingPutOveragePeriod;
long recent = now - this.pendingPutRecentPeriod;
int pos = 0;
while (pendingQueue.size() > pos) {
WeakReference<PendingPut> ref = pendingQueue.get(pos);
PendingPut item = ref.get();
if (item == null || item.completed) {
pendingQueue.remove(pos);
} else if (item.timestamp < overaged) {
// Potential leak; move to the overaged queued
pendingQueue.remove(pos);
overagePendingQueue.add(ref);
} else if (item.timestamp >= recent) {
// Don't waste time on very recent items
break;
} else if (pos > 2) {
// Don't spend too much time getting nowhere
break;
} else {
// Move on to the next item
pos++;
}
}
// Process the overage queue until we find an item to clean
// or an incomplete item that hasn't aged out
long mustCleanTime = now - this.maxPendingPutDelay;
while (overagePendingQueue.size() > 0) {
WeakReference<PendingPut> ref = overagePendingQueue.get(0);
PendingPut item = ref.get();
if (item == null || item.completed) {
overagePendingQueue.remove(0);
} else {
if (item.timestamp < mustCleanTime) {
overagePendingQueue.remove(0);
toClean = item;
}
break;
}
}
} finally {
if (lock) {
pendingLock.unlock();
}
}
// We've found a pendingPut that never happened; clean it up
if (toClean != null) {
PendingPutMap map = pendingPuts.get(toClean.key);
if (map != null) {
synchronized (map) {
PendingPut cleaned = map.remove(toClean.owner);
if (toClean.equals(cleaned) == false) {
// Oops. Restore it.
map.put(cleaned);
} else if (map.size() == 0) {
pendingPuts.remove(toClean.key);
}
}
}
}
}
/**
* Lazy-initialization map for PendingPut. Optimized for the expected usual case where only a
* single put is pending for a given key.
*
* This class is NOT THREAD SAFE. All operations on it must be performed with the object monitor
* held.
*/
private static class PendingPutMap {
private PendingPut singlePendingPut;
private Map<Object, PendingPut> fullMap;
public void put(PendingPut pendingPut) {
if (singlePendingPut == null) {
if (fullMap == null) {
// initial put
singlePendingPut = pendingPut;
} else {
fullMap.put(pendingPut.owner, pendingPut);
}
} else {
// 2nd put; need a map
fullMap = new HashMap<Object, PendingPut>(4);
fullMap.put(singlePendingPut.owner, singlePendingPut);
singlePendingPut = null;
fullMap.put(pendingPut.owner, pendingPut);
}
}
public PendingPut remove(Object ownerForPut) {
PendingPut removed = null;
if (fullMap == null) {
if (singlePendingPut != null && singlePendingPut.owner.equals(ownerForPut)) {
removed = singlePendingPut;
singlePendingPut = null;
}
} else {
removed = fullMap.remove(ownerForPut);
}
return removed;
}
public int size() {
return fullMap == null ? (singlePendingPut == null ? 0 : 1) : fullMap.size();
}
}
private static class PendingPut {
private final Object key;
private final Object owner;
private final long timestamp = System.currentTimeMillis();
private volatile boolean completed;
private PendingPut(Object key, Object owner) {
this.key = key;
this.owner = owner;
}
}
private static class RecentRemoval {
private final Object key;
private final Long timestamp;
private RecentRemoval(Object key, long nakedPutInvalidationPeriod) {
this.key = key;
timestamp = Long.valueOf(System.currentTimeMillis() + nakedPutInvalidationPeriod);
}
}
}

View File

@ -32,6 +32,8 @@ import org.hibernate.cache.access.SoftLock;
import org.hibernate.cache.infinispan.impl.BaseRegion;
import org.hibernate.cache.infinispan.util.CacheAdapter;
import org.hibernate.cache.infinispan.util.CacheHelper;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Defines the strategy for transactional access to entity or collection data in a Infinispan instance.
@ -44,31 +46,50 @@ import org.hibernate.cache.infinispan.util.CacheHelper;
* @since 3.5
*/
public class TransactionalAccessDelegate {
private static final Log log = LogFactory.getLog(TransactionalAccessDelegate.class);
protected final CacheAdapter cacheAdapter;
protected final BaseRegion region;
protected final PutFromLoadValidator putValidator;
public TransactionalAccessDelegate(BaseRegion region) {
public TransactionalAccessDelegate(BaseRegion region, PutFromLoadValidator validator) {
this.region = region;
this.cacheAdapter = region.getCacheAdapter();
this.putValidator = validator;
}
public Object get(Object key, long txTimestamp) throws CacheException {
if (!region.checkValid())
return null;
return cacheAdapter.get(key);
Object val = cacheAdapter.get(key);
if (val == null)
putValidator.registerPendingPut(key);
return val;
}
public boolean putFromLoad(Object key, Object value, long txTimestamp, Object version) throws CacheException {
if (!region.checkValid())
if (!region.checkValid()) {
return false;
}
if (!putValidator.isPutValid(key)) {
return false;
}
cacheAdapter.putForExternalRead(key, value);
return true;
}
public boolean putFromLoad(Object key, Object value, long txTimestamp, Object version, boolean minimalPutOverride)
throws CacheException {
return putFromLoad(key, value, txTimestamp, version);
boolean trace = log.isTraceEnabled();
if (!region.checkValid()) {
if (trace) log.trace("Region not valid");
return false;
}
if (!putValidator.isPutValid(key)) {
if (trace) log.trace("Put {0} not valid", key);
return false;
}
cacheAdapter.putForExternalRead(key, value);
return true;
}
public SoftLock lockItem(Object key, Object version) throws CacheException {
@ -113,18 +134,22 @@ public class TransactionalAccessDelegate {
// We update whether or not the region is valid. Other nodes
// may have already restored the region so they need to
// be informed of the change.
putValidator.keyRemoved(key);
cacheAdapter.remove(key);
}
public void removeAll() throws CacheException {
putValidator.cleared();
cacheAdapter.clear();
}
public void evict(Object key) throws CacheException {
putValidator.keyRemoved(key);
cacheAdapter.remove(key);
}
public void evictAll() throws CacheException {
putValidator.cleared();
Transaction tx = region.suspend();
try {
CacheHelper.sendEvictAllNotification(cacheAdapter, region.getAddress());

View File

@ -7,6 +7,7 @@ import org.hibernate.cache.CacheException;
import org.hibernate.cache.CollectionRegion;
import org.hibernate.cache.access.AccessType;
import org.hibernate.cache.access.CollectionRegionAccessStrategy;
import org.hibernate.cache.infinispan.access.PutFromLoadValidator;
import org.hibernate.cache.infinispan.impl.BaseTransactionalDataRegion;
import org.hibernate.cache.infinispan.util.CacheAdapter;
import org.infinispan.notifications.Listener;
@ -32,4 +33,7 @@ public class CollectionRegionImpl extends BaseTransactionalDataRegion implements
throw new CacheException("Unsupported access type [" + accessType.getName() + "]");
}
public PutFromLoadValidator getPutFromLoadValidator() {
return new PutFromLoadValidator(transactionManager);
}
}

View File

@ -2,8 +2,8 @@ package org.hibernate.cache.infinispan.collection;
import org.hibernate.cache.CacheException;
import org.hibernate.cache.access.SoftLock;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* This defines the strategy for transactional access to collection data in a
@ -17,7 +17,7 @@ import org.slf4j.LoggerFactory;
* @since 3.5
*/
class ReadOnlyAccess extends TransactionalAccess {
private static final Logger log = LoggerFactory.getLogger(ReadOnlyAccess.class);
private static final Log log = LogFactory.getLog(ReadOnlyAccess.class);
ReadOnlyAccess(CollectionRegionImpl region) {
super(region);

View File

@ -21,7 +21,7 @@ class TransactionalAccess implements CollectionRegionAccessStrategy {
TransactionalAccess(CollectionRegionImpl region) {
this.region = region;
this.delegate = new TransactionalAccessDelegate(region);
this.delegate = new TransactionalAccessDelegate(region, region.getPutFromLoadValidator());
}
public void evict(Object key) throws CacheException {

View File

@ -7,6 +7,7 @@ import org.hibernate.cache.CacheException;
import org.hibernate.cache.EntityRegion;
import org.hibernate.cache.access.AccessType;
import org.hibernate.cache.access.EntityRegionAccessStrategy;
import org.hibernate.cache.infinispan.access.PutFromLoadValidator;
import org.hibernate.cache.infinispan.impl.BaseTransactionalDataRegion;
import org.hibernate.cache.infinispan.util.CacheAdapter;
import org.infinispan.notifications.Listener;
@ -32,4 +33,7 @@ public class EntityRegionImpl extends BaseTransactionalDataRegion implements Ent
throw new CacheException("Unsupported access type [" + accessType.getName() + "]");
}
public PutFromLoadValidator getPutFromLoadValidator() {
return new PutFromLoadValidator(transactionManager);
}
}

View File

@ -2,8 +2,8 @@ package org.hibernate.cache.infinispan.entity;
import org.hibernate.cache.CacheException;
import org.hibernate.cache.access.SoftLock;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* A specialization of {@link TransactionalAccess} that ensures we never update data. Infinispan
@ -14,7 +14,7 @@ import org.slf4j.LoggerFactory;
* @since 3.5
*/
class ReadOnlyAccess extends TransactionalAccess {
private static final Logger log = LoggerFactory.getLogger(ReadOnlyAccess.class);
private static final Log log = LogFactory.getLog(ReadOnlyAccess.class);
ReadOnlyAccess(EntityRegionImpl region) {
super(region);

View File

@ -21,7 +21,7 @@ class TransactionalAccess implements EntityRegionAccessStrategy {
TransactionalAccess(EntityRegionImpl region) {
this.region = region;
this.delegate = new TransactionalAccessDelegate(region);
this.delegate = new TransactionalAccessDelegate(region, region.getPutFromLoadValidator());
}
public void evict(Object key) throws CacheException {

View File

@ -1,6 +1,7 @@
package org.hibernate.cache.infinispan.impl;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
@ -130,11 +131,13 @@ public abstract class BaseRegion implements Region {
public Map toMap() {
if (checkValid()) {
Map map = cacheAdapter.toMap();
Set keys = map.keySet();
for (Object key : keys) {
if (CacheHelper.isEvictAllNotification(key)) {
map.remove(key);
// If copying causes issues, provide a lazily loaded Map
Map map = new HashMap();
Set<Map.Entry> entries = cacheAdapter.toMap().entrySet();
for (Map.Entry entry : entries) {
Object key = entry.getKey();
if (!CacheHelper.isEvictAllNotification(key)) {
map.put(key, entry.getValue());
}
}
return map;
@ -149,7 +152,7 @@ public abstract class BaseRegion implements Region {
cacheAdapter.removeListener(this);
}
}
public boolean contains(Object key) {
if (!checkValid())
return false;
@ -209,7 +212,19 @@ public abstract class BaseRegion implements Region {
resume(tx);
}
}
public Object getOwnerForPut() {
Transaction tx = null;
try {
if (transactionManager != null) {
tx = transactionManager.getTransaction();
}
} catch (SystemException se) {
throw new CacheException("Could not obtain transaction", se);
}
return tx == null ? Thread.currentThread() : tx;
}
/**
* Tell the TransactionManager to suspend any ongoing transaction.
*

View File

@ -29,8 +29,8 @@ import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Helper for dealing with Infinisan cache instances.
@ -40,7 +40,7 @@ import org.slf4j.LoggerFactory;
*/
public class CacheHelper {
private static final Logger log = LoggerFactory.getLogger(CacheHelper.class);
private static final Log log = LogFactory.getLog(CacheHelper.class);
/**
* Disallow external instantiation of CacheHelper.

View File

@ -0,0 +1,414 @@
/*
* Hibernate, Relational Persistence for Idiomatic Java
*
* Copyright (c) 2009, Red Hat, Inc or third-party contributors as
* indicated by the @author tags or express copyright attribution
* statements applied by the authors.  All third-party contributions are
* distributed under license by Red Hat Middleware LLC.
*
* This copyrighted material is made available to anyone wishing to use, modify,
* copy, or redistribute it subject to the terms and conditions of the GNU
* Lesser General Public License, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
* for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this distribution; if not, write to:
* Free Software Foundation, Inc.
* 51 Franklin Street, Fifth Floor
* Boston, MA 02110-1301 USA
*/
package org.hibernate.test.cache.infinispan.access;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import javax.transaction.Transaction;
import javax.transaction.TransactionManager;
import org.hibernate.cache.infinispan.access.PutFromLoadValidator;
import org.hibernate.test.cache.infinispan.functional.cluster.DualNodeJtaTransactionManagerImpl;
import junit.framework.TestCase;
/**
* Tests of {@link PutFromLoadValidator}.
*
* @author Brian Stansberry
* @author Galder Zamarreño
*
* @version $Revision: $
*/
public class PutFromLoadValidatorUnitTestCase extends TestCase {
private Object KEY1 = "KEY1";
private TransactionManager tm;
public PutFromLoadValidatorUnitTestCase(String name) {
super(name);
}
@Override
protected void setUp() throws Exception {
super.setUp();
tm = DualNodeJtaTransactionManagerImpl.getInstance("test");
}
@Override
protected void tearDown() throws Exception {
try {
super.tearDown();
} finally {
tm = null;
try {
DualNodeJtaTransactionManagerImpl.cleanupTransactions();
} finally {
DualNodeJtaTransactionManagerImpl.cleanupTransactionManagers();
}
}
}
public void testNakedPut() throws Exception {
nakedPutTest(false);
}
public void testNakedPutTransactional() throws Exception {
nakedPutTest(true);
}
private void nakedPutTest(boolean transactional) throws Exception {
PutFromLoadValidator testee = new PutFromLoadValidator(transactional ? tm : null);
if (transactional) {
tm.begin();
}
assertTrue(testee.isPutValid(KEY1));
}
public void testRegisteredPut() throws Exception {
registeredPutTest(false);
}
public void testRegisteredPutTransactional() throws Exception {
registeredPutTest(true);
}
private void registeredPutTest(boolean transactional) throws Exception {
PutFromLoadValidator testee = new PutFromLoadValidator(transactional ? tm : null);
if (transactional) {
tm.begin();
}
testee.registerPendingPut(KEY1);
assertTrue(testee.isPutValid(KEY1));
}
public void testNakedPutAfterKeyRemoval() throws Exception {
nakedPutAfterRemovalTest(false, false);
}
public void testNakedPutAfterKeyRemovalTransactional() throws Exception {
nakedPutAfterRemovalTest(true, false);
}
public void testNakedPutAfterRegionRemoval() throws Exception {
nakedPutAfterRemovalTest(false, true);
}
public void testNakedPutAfterRegionRemovalTransactional() throws Exception {
nakedPutAfterRemovalTest(true, true);
}
private void nakedPutAfterRemovalTest(boolean transactional, boolean removeRegion)
throws Exception {
PutFromLoadValidator testee = new PutFromLoadValidator(transactional ? tm : null);
if (removeRegion) {
testee.cleared();
} else {
testee.keyRemoved(KEY1);
}
if (transactional) {
tm.begin();
}
assertFalse(testee.isPutValid(KEY1));
}
public void testRegisteredPutAfterKeyRemoval() throws Exception {
registeredPutAfterRemovalTest(false, false);
}
public void testRegisteredPutAfterKeyRemovalTransactional() throws Exception {
registeredPutAfterRemovalTest(true, false);
}
public void testRegisteredPutAfterRegionRemoval() throws Exception {
registeredPutAfterRemovalTest(false, true);
}
public void testRegisteredPutAfterRegionRemovalTransactional() throws Exception {
registeredPutAfterRemovalTest(true, true);
}
private void registeredPutAfterRemovalTest(boolean transactional, boolean removeRegion)
throws Exception {
PutFromLoadValidator testee = new PutFromLoadValidator(transactional ? tm : null);
if (removeRegion) {
testee.cleared();
} else {
testee.keyRemoved(KEY1);
}
if (transactional) {
tm.begin();
}
testee.registerPendingPut(KEY1);
assertTrue(testee.isPutValid(KEY1));
}
public void testRegisteredPutWithInterveningKeyRemoval() throws Exception {
registeredPutWithInterveningRemovalTest(false, false);
}
public void testRegisteredPutWithInterveningKeyRemovalTransactional() throws Exception {
registeredPutWithInterveningRemovalTest(true, false);
}
public void testRegisteredPutWithInterveningRegionRemoval() throws Exception {
registeredPutWithInterveningRemovalTest(false, true);
}
public void testRegisteredPutWithInterveningRegionRemovalTransactional() throws Exception {
registeredPutWithInterveningRemovalTest(true, true);
}
private void registeredPutWithInterveningRemovalTest(boolean transactional, boolean removeRegion)
throws Exception {
PutFromLoadValidator testee = new PutFromLoadValidator(transactional ? tm : null);
if (transactional) {
tm.begin();
}
testee.registerPendingPut(KEY1);
if (removeRegion) {
testee.cleared();
} else {
testee.keyRemoved(KEY1);
}
assertFalse(testee.isPutValid(KEY1));
}
public void testDelayedNakedPutAfterKeyRemoval() throws Exception {
delayedNakedPutAfterRemovalTest(false, false);
}
public void testDelayedNakedPutAfterKeyRemovalTransactional() throws Exception {
delayedNakedPutAfterRemovalTest(true, false);
}
public void testDelayedNakedPutAfterRegionRemoval() throws Exception {
delayedNakedPutAfterRemovalTest(false, true);
}
public void testDelayedNakedPutAfterRegionRemovalTransactional() throws Exception {
delayedNakedPutAfterRemovalTest(true, true);
}
private void delayedNakedPutAfterRemovalTest(boolean transactional, boolean removeRegion)
throws Exception {
PutFromLoadValidator testee = new TestValidator(transactional ? tm : null, 100, 1000, 500,
10000);
if (removeRegion) {
testee.cleared();
} else {
testee.keyRemoved(KEY1);
}
if (transactional) {
tm.begin();
}
Thread.sleep(110);
assertTrue(testee.isPutValid(KEY1));
}
public void testMultipleRegistrations() throws Exception {
multipleRegistrationtest(false);
}
public void testMultipleRegistrationsTransactional() throws Exception {
multipleRegistrationtest(true);
}
private void multipleRegistrationtest(final boolean transactional) throws Exception {
final PutFromLoadValidator testee = new PutFromLoadValidator(transactional ? tm : null);
final CountDownLatch registeredLatch = new CountDownLatch(3);
final CountDownLatch finishedLatch = new CountDownLatch(3);
final AtomicInteger success = new AtomicInteger();
Runnable r = new Runnable() {
public void run() {
try {
if (transactional) {
tm.begin();
}
testee.registerPendingPut(KEY1);
registeredLatch.countDown();
registeredLatch.await(5, TimeUnit.SECONDS);
if (testee.isPutValid(KEY1)) {
success.incrementAndGet();
}
finishedLatch.countDown();
} catch (Exception e) {
e.printStackTrace();
}
}
};
ExecutorService executor = Executors.newFixedThreadPool(3);
// Start with a removal so the "isPutValid" calls will fail if
// any of the concurrent activity isn't handled properly
testee.cleared();
// Do the registration + isPutValid calls
executor.execute(r);
executor.execute(r);
executor.execute(r);
finishedLatch.await(5, TimeUnit.SECONDS);
assertEquals("All threads succeeded", 3, success.get());
}
/**
* White box test for ensuring key removals get cleaned up.
*
* @throws Exception
*/
public void testRemovalCleanup() throws Exception {
TestValidator testee = new TestValidator(null, 200, 1000, 500, 10000);
testee.keyRemoved("KEY1");
testee.keyRemoved("KEY2");
Thread.sleep(210);
assertEquals(2, testee.getRemovalQueueLength());
testee.keyRemoved("KEY1");
assertEquals(2, testee.getRemovalQueueLength());
testee.keyRemoved("KEY2");
assertEquals(2, testee.getRemovalQueueLength());
}
/**
* Very much a white box test of the logic for ensuring pending put registrations get cleaned up.
*
* @throws Exception
*/
public void testPendingPutCleanup() throws Exception {
TestValidator testee = new TestValidator(tm, 5000, 600, 300, 900);
// Start with a regionRemoval so we can confirm at the end that all
// registrations have been cleaned out
testee.cleared();
testee.registerPendingPut("1");
testee.registerPendingPut("2");
testee.registerPendingPut("3");
testee.registerPendingPut("4");
testee.registerPendingPut("5");
testee.registerPendingPut("6");
testee.isPutValid("6");
testee.isPutValid("2");
// ppq = [1,2(c),3,4,5,6(c)]
assertEquals(6, testee.getPendingPutQueueLength());
assertEquals(0, testee.getOveragePendingPutQueueLength());
// Sleep past "pendingPutRecentPeriod"
Thread.sleep(310);
testee.registerPendingPut("7");
// White box -- should have cleaned out 2 (completed) but
// not gotten to 6 (also removed)
// ppq = [1,3,4,5,6(c),7]
assertEquals(0, testee.getOveragePendingPutQueueLength());
assertEquals(6, testee.getPendingPutQueueLength());
// Sleep past "pendingPutOveragePeriod"
Thread.sleep(310);
testee.registerPendingPut("8");
// White box -- should have cleaned out 6 (completed) and
// moved 1, 3, 4 and 5 to overage queue
// oppq = [1,3,4,5] ppq = [7,8]
assertEquals(4, testee.getOveragePendingPutQueueLength());
assertEquals(2, testee.getPendingPutQueueLength());
// Sleep past "maxPendingPutDelay"
Thread.sleep(310);
testee.isPutValid("3");
// White box -- should have cleaned out 1 (overage) and
// moved 7 to overage queue
// oppq = [3(c),4,5,7] ppq=[8]
assertEquals(4, testee.getOveragePendingPutQueueLength());
assertEquals(1, testee.getPendingPutQueueLength());
// Sleep past "maxPendingPutDelay"
Thread.sleep(310);
tm.begin();
testee.registerPendingPut("7");
Transaction tx = tm.suspend();
// White box -- should have cleaned out 3 (completed)
// and 4 (overage) and moved 8 to overage queue
// We now have 5,7,8 in overage and 7tx in pending
// oppq = [5,7,8] ppq=[7tx]
assertEquals(3, testee.getOveragePendingPutQueueLength());
assertEquals(1, testee.getPendingPutQueueLength());
// Validate that only expected items can do puts, thus indirectly
// proving the others have been cleaned out of pendingPuts map
assertFalse(testee.isPutValid("1"));
// 5 was overage, so should have been cleaned
assertEquals(2, testee.getOveragePendingPutQueueLength());
assertFalse(testee.isPutValid("2"));
// 7 was overage, so should have been cleaned
assertEquals(1, testee.getOveragePendingPutQueueLength());
assertFalse(testee.isPutValid("3"));
assertFalse(testee.isPutValid("4"));
assertFalse(testee.isPutValid("5"));
assertFalse(testee.isPutValid("6"));
assertFalse(testee.isPutValid("7"));
assertTrue(testee.isPutValid("8"));
tm.resume(tx);
assertTrue(testee.isPutValid("7"));
}
private static class TestValidator extends PutFromLoadValidator {
protected TestValidator(TransactionManager transactionManager,
long nakedPutInvalidationPeriod, long pendingPutOveragePeriod,
long pendingPutRecentPeriod, long maxPendingPutDelay) {
super(transactionManager, nakedPutInvalidationPeriod, pendingPutOveragePeriod,
pendingPutRecentPeriod, maxPendingPutDelay);
}
@Override
public int getOveragePendingPutQueueLength() {
// TODO Auto-generated method stub
return super.getOveragePendingPutQueueLength();
}
@Override
public int getPendingPutQueueLength() {
// TODO Auto-generated method stub
return super.getPendingPutQueueLength();
}
@Override
public int getRemovalQueueLength() {
// TODO Auto-generated method stub
return super.getRemovalQueueLength();
}
}
}

View File

@ -0,0 +1,96 @@
/*
* Hibernate, Relational Persistence for Idiomatic Java
*
* Copyright (c) 2007, Red Hat Middleware LLC or third-party contributors as
* indicated by the @author tags or express copyright attribution
* statements applied by the authors.  All third-party contributions are
* distributed under license by Red Hat Middleware LLC.
*
* This copyrighted material is made available to anyone wishing to use, modify,
* copy, or redistribute it subject to the terms and conditions of the GNU
* Lesser General Public License, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
* for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this distribution; if not, write to:
* Free Software Foundation, Inc.
* 51 Franklin Street, Fifth Floor
* Boston, MA 02110-1301 USA
*/
package org.hibernate.test.cache.infinispan.collection;
import java.util.Properties;
import org.hibernate.cache.CacheDataDescription;
import org.hibernate.cache.CacheException;
import org.hibernate.cache.CollectionRegion;
import org.hibernate.cache.Region;
import org.hibernate.cache.RegionFactory;
import org.hibernate.cache.access.AccessType;
import org.hibernate.cache.access.CollectionRegionAccessStrategy;
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
import org.hibernate.cache.infinispan.util.CacheAdapter;
import org.hibernate.cache.infinispan.util.CacheAdapterImpl;
import org.hibernate.test.cache.infinispan.AbstractEntityCollectionRegionTestCase;
/**
* Tests of CollectionRegionImpl.
*
* @author Galder Zamarreño
*/
public class CollectionRegionImplTestCase extends AbstractEntityCollectionRegionTestCase {
public CollectionRegionImplTestCase(String name) {
super(name);
}
@Override
protected void supportedAccessTypeTest(RegionFactory regionFactory, Properties properties) {
CollectionRegion region = regionFactory.buildCollectionRegion("test", properties, null);
assertNull("Got TRANSACTIONAL", region.buildAccessStrategy(AccessType.TRANSACTIONAL)
.lockRegion());
try {
region.buildAccessStrategy(AccessType.READ_ONLY).lockRegion();
fail("Did not get READ_ONLY");
} catch (UnsupportedOperationException good) {
}
try {
region.buildAccessStrategy(AccessType.NONSTRICT_READ_WRITE);
fail("Incorrectly got NONSTRICT_READ_WRITE");
} catch (CacheException good) {
}
try {
region.buildAccessStrategy(AccessType.READ_WRITE);
fail("Incorrectly got READ_WRITE");
} catch (CacheException good) {
}
}
@Override
protected Region createRegion(InfinispanRegionFactory regionFactory, String regionName, Properties properties, CacheDataDescription cdd) {
return regionFactory.buildCollectionRegion(regionName, properties, cdd);
}
@Override
protected CacheAdapter getInfinispanCache(InfinispanRegionFactory regionFactory) {
return CacheAdapterImpl.newInstance(regionFactory.getCacheManager().getCache(InfinispanRegionFactory.DEF_ENTITY_RESOURCE));
}
@Override
protected void putInRegion(Region region, Object key, Object value) {
CollectionRegionAccessStrategy strategy = ((CollectionRegion) region).buildAccessStrategy(AccessType.TRANSACTIONAL);
strategy.putFromLoad(key, value, System.currentTimeMillis(), new Integer(1));
}
@Override
protected void removeFromRegion(Region region, Object key) {
((CollectionRegion) region).buildAccessStrategy(AccessType.TRANSACTIONAL).remove(key);
}
}

View File

@ -434,9 +434,7 @@ public abstract class AbstractEntityRegionAccessStrategyTestCase extends Abstrac
final String KEY = KEY_BASE + testCount++;
// Set up initial state
localAccessStrategy.get(KEY, System.currentTimeMillis());
localAccessStrategy.putFromLoad(KEY, VALUE1, System.currentTimeMillis(), new Integer(1));
remoteAccessStrategy.get(KEY, System.currentTimeMillis());
remoteAccessStrategy.putFromLoad(KEY, VALUE1, System.currentTimeMillis(), new Integer(1));
// Let the async put propagate

View File

@ -52,7 +52,6 @@ public abstract class AbstractTransactionalAccessTestCase extends AbstractEntity
final String KEY = KEY_BASE + testCount++;
localAccessStrategy.get(KEY, System.currentTimeMillis());
localAccessStrategy.putFromLoad(KEY, VALUE1, System.currentTimeMillis(), new Integer(1));
final CountDownLatch pferLatch = new CountDownLatch(1);

View File

@ -43,60 +43,53 @@ import org.hibernate.test.cache.infinispan.AbstractEntityCollectionRegionTestCas
* @since 3.5
*/
public class EntityRegionImplTestCase extends AbstractEntityCollectionRegionTestCase {
public EntityRegionImplTestCase(String name) {
super(name);
}
@Override
protected void supportedAccessTypeTest(RegionFactory regionFactory, Properties properties) {
EntityRegion region = regionFactory.buildEntityRegion("test", properties, null);
assertNull("Got TRANSACTIONAL", region.buildAccessStrategy(AccessType.TRANSACTIONAL).lockRegion());
try
{
region.buildAccessStrategy(AccessType.READ_ONLY).lockRegion();
fail("Did not get READ_ONLY");
}
catch (UnsupportedOperationException good) {}
try
{
region.buildAccessStrategy(AccessType.NONSTRICT_READ_WRITE);
fail("Incorrectly got NONSTRICT_READ_WRITE");
}
catch (CacheException good) {}
try
{
region.buildAccessStrategy(AccessType.READ_WRITE);
fail("Incorrectly got READ_WRITE");
}
catch (CacheException good) {}
}
@Override
protected void putInRegion(Region region, Object key, Object value) {
((EntityRegion) region).buildAccessStrategy(AccessType.TRANSACTIONAL).insert(key, value, new Integer(1));
}
@Override
protected void removeFromRegion(Region region, Object key) {
((EntityRegion) region).buildAccessStrategy(AccessType.TRANSACTIONAL).remove(key);
}
public EntityRegionImplTestCase(String name) {
super(name);
}
@Override
protected Region createRegion(InfinispanRegionFactory regionFactory, String regionName,
Properties properties, CacheDataDescription cdd) {
protected void supportedAccessTypeTest(RegionFactory regionFactory, Properties properties) {
EntityRegion region = regionFactory.buildEntityRegion("test", properties, null);
assertNull("Got TRANSACTIONAL", region.buildAccessStrategy(AccessType.TRANSACTIONAL)
.lockRegion());
try {
region.buildAccessStrategy(AccessType.READ_ONLY).lockRegion();
fail("Did not get READ_ONLY");
} catch (UnsupportedOperationException good) {
}
try {
region.buildAccessStrategy(AccessType.NONSTRICT_READ_WRITE);
fail("Incorrectly got NONSTRICT_READ_WRITE");
} catch (CacheException good) {
}
try {
region.buildAccessStrategy(AccessType.READ_WRITE);
fail("Incorrectly got READ_WRITE");
} catch (CacheException good) {
}
}
@Override
protected void putInRegion(Region region, Object key, Object value) {
((EntityRegion) region).buildAccessStrategy(AccessType.TRANSACTIONAL).insert(key, value, new Integer(1));
}
@Override
protected void removeFromRegion(Region region, Object key) {
((EntityRegion) region).buildAccessStrategy(AccessType.TRANSACTIONAL).remove(key);
}
@Override
protected Region createRegion(InfinispanRegionFactory regionFactory, String regionName, Properties properties, CacheDataDescription cdd) {
return regionFactory.buildEntityRegion(regionName, properties, cdd);
}
@Override
protected CacheAdapter getInfinispanCache(InfinispanRegionFactory regionFactory) {
return CacheAdapterImpl.newInstance(regionFactory.getCacheManager().getCache("entity"));
return CacheAdapterImpl.newInstance(regionFactory.getCacheManager().getCache(InfinispanRegionFactory.DEF_ENTITY_RESOURCE));
}
}

View File

@ -1,38 +0,0 @@
package org.hibernate.test.cache.infinispan.functional;
import java.util.Map;
import org.hibernate.junit.functional.FunctionalTestCase;
import org.hibernate.stat.SecondLevelCacheStatistics;
import org.hibernate.stat.Statistics;
/**
* @author Galder Zamarreño
* @since 3.5
*/
public abstract class AbstractFunctionalTestCase extends FunctionalTestCase {
private final String cacheConcurrencyStrategy;
public AbstractFunctionalTestCase(String string, String cacheConcurrencyStrategy) {
super(string);
this.cacheConcurrencyStrategy = cacheConcurrencyStrategy;
}
public String[] getMappings() {
return new String[] { "cache/infinispan/functional/Item.hbm.xml" };
}
@Override
public String getCacheConcurrencyStrategy() {
return cacheConcurrencyStrategy;
}
public void testEmptySecondLevelCacheEntry() throws Exception {
getSessions().getCache().evictEntityRegion(Item.class.getName());
Statistics stats = getSessions().getStatistics();
stats.clear();
SecondLevelCacheStatistics statistics = stats.getSecondLevelCacheStatistics(Item.class.getName() + ".items");
Map cacheEntries = statistics.getEntries();
assertEquals(0, cacheEntries.size());
}
}

View File

@ -4,10 +4,15 @@ package org.hibernate.test.cache.infinispan.functional;
* @author Galder Zamarreño
* @since 3.5
*/
public class BasicReadOnlyTestCase extends AbstractFunctionalTestCase {
public class BasicReadOnlyTestCase extends SingleNodeTestCase {
public BasicReadOnlyTestCase(String string) {
super(string, "read-only");
super(string);
}
@Override
public String getCacheConcurrencyStrategy() {
return "read-only";
}
}

View File

@ -6,106 +6,154 @@ import java.util.Map;
import org.hibernate.Session;
import org.hibernate.Transaction;
import org.hibernate.cache.entry.CacheEntry;
import org.hibernate.cfg.Configuration;
import org.hibernate.stat.SecondLevelCacheStatistics;
import org.hibernate.stat.Statistics;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* @author Galder Zamarreño
* @since 3.5
*/
public class BasicTransactionalTestCase extends AbstractFunctionalTestCase {
public class BasicTransactionalTestCase extends SingleNodeTestCase {
private static final Log log = LogFactory.getLog(BasicTransactionalTestCase.class);
public BasicTransactionalTestCase(String string) {
super(string, "transactional");
super(string);
}
public void testEntityCache() {
@Override
public void configure(Configuration cfg) {
super.configure(cfg);
}
public void testEntityCache() throws Exception {
Item item = new Item("chris", "Chris's Item");
beginTx();
try {
Session s = openSession();
s.getTransaction().begin();
s.persist(item);
s.getTransaction().commit();
s.close();
} catch (Exception e) {
setRollbackOnlyTx(e);
} finally {
commitOrRollbackTx();
}
Session s = openSession();
Statistics stats = s.getSessionFactory().getStatistics();
s.getTransaction().begin();
s.persist(item);
s.getTransaction().commit();
s.close();
s = openSession();
Item found = (Item) s.load(Item.class, item.getId());
System.out.println(stats);
assertEquals(item.getDescription(), found.getDescription());
assertEquals(0, stats.getSecondLevelCacheMissCount());
assertEquals(1, stats.getSecondLevelCacheHitCount());
s.delete(found);
s.close();
beginTx();
try {
Session s = openSession();
Item found = (Item) s.load(Item.class, item.getId());
Statistics stats = s.getSessionFactory().getStatistics();
log.info(stats.toString());
assertEquals(item.getDescription(), found.getDescription());
assertEquals(0, stats.getSecondLevelCacheMissCount());
assertEquals(1, stats.getSecondLevelCacheHitCount());
s.delete(found);
s.close();
} catch (Exception e) {
setRollbackOnlyTx(e);
} finally {
commitOrRollbackTx();
}
}
public void testCollectionCache() {
public void testCollectionCache() throws Exception {
Item item = new Item("chris", "Chris's Item");
Item another = new Item("another", "Owned Item");
item.addItem(another);
Session s = openSession();
s.getTransaction().begin();
s.persist(item);
s.persist(another);
s.getTransaction().commit();
s.close();
beginTx();
try {
Session s = openSession();
s.getTransaction().begin();
s.persist(item);
s.persist(another);
s.getTransaction().commit();
s.close();
} catch (Exception e) {
setRollbackOnlyTx(e);
} finally {
commitOrRollbackTx();
}
s = openSession();
Statistics stats = s.getSessionFactory().getStatistics();
Item loaded = (Item) s.load(Item.class, item.getId());
assertEquals(1, loaded.getItems().size());
s.close();
beginTx();
try {
Session s = openSession();
Item loaded = (Item) s.load(Item.class, item.getId());
assertEquals(1, loaded.getItems().size());
s.close();
} catch (Exception e) {
setRollbackOnlyTx(e);
} finally {
commitOrRollbackTx();
}
s = openSession();
SecondLevelCacheStatistics cStats = stats.getSecondLevelCacheStatistics(Item.class.getName() + ".items");
Item loadedWithCachedCollection = (Item) s.load(Item.class, item.getId());
stats.logSummary();
assertEquals(item.getName(), loadedWithCachedCollection.getName());
assertEquals(item.getItems().size(), loadedWithCachedCollection.getItems().size());
assertEquals(1, cStats.getHitCount());
Map cacheEntries = cStats.getEntries();
assertEquals(1, cacheEntries.size());
s.close();
beginTx();
try {
Session s = openSession();
Statistics stats = s.getSessionFactory().getStatistics();
SecondLevelCacheStatistics cStats = stats.getSecondLevelCacheStatistics(Item.class.getName() + ".items");
Item loadedWithCachedCollection = (Item) s.load(Item.class, item.getId());
stats.logSummary();
assertEquals(item.getName(), loadedWithCachedCollection.getName());
assertEquals(item.getItems().size(), loadedWithCachedCollection.getItems().size());
assertEquals(1, cStats.getHitCount());
Map cacheEntries = cStats.getEntries();
assertEquals(1, cacheEntries.size());
s.close();
} catch (Exception e) {
setRollbackOnlyTx(e);
} finally {
commitOrRollbackTx();
}
}
public void testStaleWritesLeaveCacheConsistent() {
Session s = openSession();
Transaction txn = s.beginTransaction();
VersionedItem item = new VersionedItem();
item.setName("steve");
item.setDescription("steve's item");
s.save(item);
txn.commit();
s.close();
public void testStaleWritesLeaveCacheConsistent() throws Exception {
VersionedItem item = null;
Transaction txn = null;
Session s = null;
beginTx();
try {
s = openSession();
txn = s.beginTransaction();
item = new VersionedItem();
item.setName("steve");
item.setDescription("steve's item");
s.save(item);
txn.commit();
s.close();
} catch (Exception e) {
setRollbackOnlyTx(e);
} finally {
commitOrRollbackTx();
}
Long initialVersion = item.getVersion();
// manually revert the version property
item.setVersion(new Long(item.getVersion().longValue() - 1));
beginTx();
try {
s = openSession();
txn = s.beginTransaction();
s.update(item);
txn.commit();
s.close();
fail("expected stale write to fail");
} catch (Throwable expected) {
// expected behavior here
if (txn != null) {
try {
txn.rollback();
} catch (Throwable ignore) {
}
}
s = openSession();
txn = s.beginTransaction();
s.update(item);
txn.commit();
fail("expected stale write to fail");
} catch (Exception e) {
setRollbackOnlyTxExpected(e);
} finally {
if (s != null && s.isOpen()) {
try {
s.close();
} catch (Throwable ignore) {
}
}
commitOrRollbackTx();
if (s != null && s.isOpen()) {
try {
s.close();
} catch (Throwable ignore) {
}
}
}
// check the version value in the cache...
@ -116,24 +164,42 @@ public class BasicTransactionalTestCase extends AbstractFunctionalTestCase {
cachedVersionValue = (Long) ((CacheEntry) entry).getVersion();
assertEquals(initialVersion.longValue(), cachedVersionValue.longValue());
// cleanup
s = openSession();
txn = s.beginTransaction();
item = (VersionedItem) s.load(VersionedItem.class, item.getId());
s.delete(item);
txn.commit();
s.close();
beginTx();
try {
// cleanup
s = openSession();
txn = s.beginTransaction();
item = (VersionedItem) s.load(VersionedItem.class, item.getId());
s.delete(item);
txn.commit();
s.close();
} catch (Exception e) {
setRollbackOnlyTx(e);
} finally {
commitOrRollbackTx();
}
}
public void testQueryCacheInvalidation() {
Session s = openSession();
Transaction t = s.beginTransaction();
Item i = new Item();
i.setName("widget");
i.setDescription("A really top-quality, full-featured widget.");
s.persist(i);
t.commit();
s.close();
public void testQueryCacheInvalidation() throws Exception {
Session s = null;
Transaction t = null;
Item i = null;
beginTx();
try {
s = openSession();
t = s.beginTransaction();
i = new Item();
i.setName("widget");
i.setDescription("A really top-quality, full-featured widget.");
s.persist(i);
t.commit();
s.close();
} catch (Exception e) {
setRollbackOnlyTx(e);
} finally {
commitOrRollbackTx();
}
SecondLevelCacheStatistics slcs = s.getSessionFactory().getStatistics().getSecondLevelCacheStatistics(Item.class.getName());
@ -141,17 +207,21 @@ public class BasicTransactionalTestCase extends AbstractFunctionalTestCase {
assertEquals(slcs.getElementCountInMemory(), 1);
assertEquals(slcs.getEntries().size(), 1);
s = openSession();
t = s.beginTransaction();
i = (Item) s.get(Item.class, i.getId());
assertEquals(slcs.getHitCount(), 1);
assertEquals(slcs.getMissCount(), 0);
i.setDescription("A bog standard item");
t.commit();
s.close();
beginTx();
try {
s = openSession();
t = s.beginTransaction();
i = (Item) s.get(Item.class, i.getId());
assertEquals(slcs.getHitCount(), 1);
assertEquals(slcs.getMissCount(), 0);
i.setDescription("A bog standard item");
t.commit();
s.close();
} catch (Exception e) {
setRollbackOnlyTx(e);
} finally {
commitOrRollbackTx();
}
assertEquals(slcs.getPutCount(), 2);
@ -160,32 +230,61 @@ public class BasicTransactionalTestCase extends AbstractFunctionalTestCase {
assertTrue(ser[0].equals("widget"));
assertTrue(ser[1].equals("A bog standard item"));
// cleanup
s = openSession();
t = s.beginTransaction();
s.delete(i);
t.commit();
s.close();
beginTx();
try {
// cleanup
s = openSession();
t = s.beginTransaction();
s.delete(i);
t.commit();
s.close();
} catch (Exception e) {
setRollbackOnlyTx(e);
} finally {
commitOrRollbackTx();
}
}
public void testQueryCache() {
public void testQueryCache() throws Exception {
Session s = null;
Item item = new Item("chris", "Chris's Item");
beginTx();
try {
s = openSession();
s.getTransaction().begin();
s.persist(item);
s.getTransaction().commit();
s.close();
} catch (Exception e) {
setRollbackOnlyTx(e);
} finally {
commitOrRollbackTx();
}
Session s = openSession();
s.getTransaction().begin();
s.persist(item);
s.getTransaction().commit();
s.close();
beginTx();
try {
s = openSession();
s.createQuery("from Item").setCacheable(true).list();
s.close();
} catch (Exception e) {
setRollbackOnlyTx(e);
} finally {
commitOrRollbackTx();
}
s = openSession();
s.createQuery("from Item").setCacheable(true).list();
s.close();
s = openSession();
Statistics stats = s.getSessionFactory().getStatistics();
s.createQuery("from Item").setCacheable(true).list();
assertEquals(1, stats.getQueryCacheHitCount());
s.createQuery("delete from Item").executeUpdate();
s.close();
beginTx();
try {
s = openSession();
Statistics stats = s.getSessionFactory().getStatistics();
s.createQuery("from Item").setCacheable(true).list();
assertEquals(1, stats.getQueryCacheHitCount());
s.createQuery("delete from Item").executeUpdate();
s.close();
} catch (Exception e) {
setRollbackOnlyTx(e);
} finally {
commitOrRollbackTx();
}
}
}

View File

@ -0,0 +1,513 @@
/*
* Hibernate, Relational Persistence for Idiomatic Java
*
* Copyright (c) 2009, Red Hat Middleware LLC or third-party contributors as
* indicated by the @author tags or express copyright attribution
* statements applied by the authors.  All third-party contributions are
* distributed under license by Red Hat Middleware LLC.
*
* This copyrighted material is made available to anyone wishing to use, modify,
* copy, or redistribute it subject to the terms and conditions of the GNU
* Lesser General Public License, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
* for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this distribution; if not, write to:
* Free Software Foundation, Inc.
* 51 Franklin Street, Fifth Floor
* Boston, MA 02110-1301 USA
*/
package org.hibernate.test.cache.infinispan.functional;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import javax.transaction.TransactionManager;
import org.hibernate.FlushMode;
import org.hibernate.Session;
import org.hibernate.cache.RegionFactory;
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
import org.hibernate.cfg.Configuration;
import org.hibernate.connection.ConnectionProvider;
import org.hibernate.stat.SecondLevelCacheStatistics;
import org.hibernate.test.cache.infinispan.functional.cluster.DualNodeTestCase;
import org.hibernate.test.cache.infinispan.functional.cluster.DualNodeConnectionProviderImpl;
import org.hibernate.test.cache.infinispan.functional.cluster.DualNodeJtaTransactionManagerImpl;
import org.hibernate.test.cache.infinispan.functional.cluster.DualNodeTransactionManagerLookup;
import org.hibernate.transaction.TransactionManagerLookup;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
*
* @author nikita_tovstoles@mba.berkeley.edu
* @author Galder Zamarreño
*/
public class ConcurrentWriteTest extends SingleNodeTestCase {
private static final Log log = LogFactory.getLog(ConcurrentWriteTest.class);
/**
* when USER_COUNT==1, tests pass, when >4 tests fail
*/
private static final int USER_COUNT = 5;
private static final int ITERATION_COUNT = 150;
private static final int THINK_TIME_MILLIS = 10;
private static final long LAUNCH_INTERVAL_MILLIS = 10;
private static final Random random = new Random();
/**
* kill switch used to stop all users when one fails
*/
private static volatile boolean TERMINATE_ALL_USERS = false;
/**
* collection of IDs of all customers participating in this test
*/
private Set<Integer> customerIDs = new HashSet<Integer>();
private TransactionManager tm;
public ConcurrentWriteTest(String x) {
super(x);
}
@Override
protected TransactionManager getTransactionManager() {
return DualNodeJtaTransactionManagerImpl.getInstance(DualNodeTestCase.LOCAL);
}
@Override
protected Class<? extends RegionFactory> getCacheRegionFactory() {
return InfinispanRegionFactory.class;
}
@Override
protected Class<? extends ConnectionProvider> getConnectionProviderClass() {
return DualNodeConnectionProviderImpl.class;
}
@Override
protected Class<? extends TransactionManagerLookup> getTransactionManagerLookupClass() {
return DualNodeTransactionManagerLookup.class;
}
/**
* test that DB can be queried
*
* @throws java.lang.Exception
*/
public void testPingDb() throws Exception {
try {
beginTx();
getEnvironment().getSessionFactory().getCurrentSession().createQuery("from " + Customer.class.getName()).list();
} catch (Exception e) {
setRollbackOnlyTx(e);
// setRollbackOnly();
// fail("failed to query DB; exception=" + e);
} finally {
commitOrRollbackTx();
}
}
@Override
protected void prepareTest() throws Exception {
super.prepareTest();
TERMINATE_ALL_USERS = false;
}
@Override
protected void cleanupTest() throws Exception {
try {
super.cleanupTest();
} finally {
cleanup();
// DualNodeJtaTransactionManagerImpl.cleanupTransactions();
// DualNodeJtaTransactionManagerImpl.cleanupTransactionManagers();
}
}
@Override
public void configure(Configuration cfg) {
super.configure(cfg);
cfg.setProperty(DualNodeTestCase.NODE_ID_PROP, DualNodeTestCase.LOCAL);
}
@Override
protected boolean getUseQueryCache() {
return true;
}
public void testSingleUser() throws Exception {
// setup
Customer customer = createCustomer(0);
final Integer customerId = customer.getId();
getCustomerIDs().add(customerId);
assertNull("contact exists despite not being added", getFirstContact(customerId));
// check that cache was hit
SecondLevelCacheStatistics customerSlcs = getEnvironment().getSessionFactory()
.getStatistics().getSecondLevelCacheStatistics(Customer.class.getName());
assertEquals(customerSlcs.getPutCount(), 1);
assertEquals(customerSlcs.getElementCountInMemory(), 1);
assertEquals(customerSlcs.getEntries().size(), 1);
SecondLevelCacheStatistics contactsCollectionSlcs = getEnvironment().getSessionFactory()
.getStatistics().getSecondLevelCacheStatistics(Customer.class.getName() + ".contacts");
assertEquals(1, contactsCollectionSlcs.getPutCount());
assertEquals(1, contactsCollectionSlcs.getElementCountInMemory());
assertEquals(1, contactsCollectionSlcs.getEntries().size());
final Contact contact = addContact(customerId);
assertNotNull("contact returned by addContact is null", contact);
assertEquals("Customer.contacts cache was not invalidated after addContact", 0,
contactsCollectionSlcs.getElementCountInMemory());
assertNotNull("Contact missing after successful add call", getFirstContact(customerId));
// read everyone's contacts
readEveryonesFirstContact();
removeContact(customerId);
assertNull("contact still exists after successful remove call", getFirstContact(customerId));
}
// /**
// * TODO: This will fail until ISPN-??? has been fixed.
// *
// * @throws Exception
// */
// public void testManyUsers() throws Throwable {
// try {
// // setup - create users
// for (int i = 0; i < USER_COUNT; i++) {
// Customer customer = createCustomer(0);
// getCustomerIDs().add(customer.getId());
// }
// assertEquals("failed to create enough Customers", USER_COUNT, getCustomerIDs().size());
//
// final ExecutorService executor = Executors.newFixedThreadPool(USER_COUNT);
//
// CyclicBarrier barrier = new CyclicBarrier(USER_COUNT + 1);
// List<Future<Void>> futures = new ArrayList<Future<Void>>(USER_COUNT);
// for (Integer customerId : getCustomerIDs()) {
// Future<Void> future = executor.submit(new UserRunner(customerId, barrier));
// futures.add(future);
// Thread.sleep(LAUNCH_INTERVAL_MILLIS); // rampup
// }
//// barrier.await(); // wait for all threads to be ready
// barrier.await(45, TimeUnit.SECONDS); // wait for all threads to finish
// log.info("All threads finished, let's shutdown the executor and check whether any exceptions were reported");
// for (Future<Void> future : futures) future.get();
// log.info("All future gets checked");
// } catch (Throwable t) {
// log.error("Error running test", t);
// throw t;
// }
// }
public void cleanup() throws Exception {
getCustomerIDs().clear();
String deleteContactHQL = "delete from Contact";
String deleteCustomerHQL = "delete from Customer";
beginTx();
try {
Session session = getEnvironment().getSessionFactory().getCurrentSession();
session.createQuery(deleteContactHQL).setFlushMode(FlushMode.AUTO).executeUpdate();
session.createQuery(deleteCustomerHQL).setFlushMode(FlushMode.AUTO).executeUpdate();
} catch (Exception e) {
setRollbackOnlyTx(e);
} finally {
commitOrRollbackTx();
}
}
private Customer createCustomer(int nameSuffix) throws Exception {
Customer customer = null;
beginTx();
try {
customer = new Customer();
customer.setName("customer_" + nameSuffix);
customer.setContacts(new HashSet<Contact>());
getEnvironment().getSessionFactory().getCurrentSession().persist(customer);
} catch (Exception e) {
setRollbackOnlyTx(e);
} finally {
commitOrRollbackTx();
}
return customer;
}
/**
* read first contact of every Customer participating in this test. this forces concurrent cache
* writes of Customer.contacts Collection cache node
*
* @return who cares
* @throws java.lang.Exception
*/
private void readEveryonesFirstContact() throws Exception {
beginTx();
try {
for (Integer customerId : getCustomerIDs()) {
if (TERMINATE_ALL_USERS) {
setRollbackOnlyTx();
return;
}
Customer customer = (Customer) getEnvironment().getSessionFactory().getCurrentSession().load(Customer.class, customerId);
Set<Contact> contacts = customer.getContacts();
if (!contacts.isEmpty()) {
contacts.iterator().next();
}
}
} catch (Exception e) {
setRollbackOnlyTx(e);
} finally {
commitOrRollbackTx();
}
}
/**
* -load existing Customer -get customer's contacts; return 1st one
*
* @param customerId
* @return first Contact or null if customer has none
*/
private Contact getFirstContact(Integer customerId) throws Exception {
assert customerId != null;
Contact firstContact = null;
beginTx();
try {
final Customer customer = (Customer) getEnvironment().getSessionFactory()
.getCurrentSession().load(Customer.class, customerId);
Set<Contact> contacts = customer.getContacts();
firstContact = contacts.isEmpty() ? null : contacts.iterator().next();
if (TERMINATE_ALL_USERS)
setRollbackOnlyTx();
} catch (Exception e) {
setRollbackOnlyTx(e);
} finally {
commitOrRollbackTx();
}
return firstContact;
}
/**
* -load existing Customer -create a new Contact and add to customer's contacts
*
* @param customerId
* @return added Contact
*/
private Contact addContact(Integer customerId) throws Exception {
assert customerId != null;
Contact contact = null;
beginTx();
try {
final Customer customer = (Customer) getEnvironment().getSessionFactory()
.getCurrentSession().load(Customer.class, customerId);
contact = new Contact();
contact.setName("contact name");
contact.setTlf("wtf is tlf?");
contact.setCustomer(customer);
customer.getContacts().add(contact);
// assuming contact is persisted via cascade from customer
if (TERMINATE_ALL_USERS)
setRollbackOnlyTx();
} catch (Exception e) {
setRollbackOnlyTx(e);
} finally {
commitOrRollbackTx();
}
return contact;
}
/**
* remove existing 'contact' from customer's list of contacts
*
* @param contact
* contact to remove from customer's contacts
* @param customerId
* @throws IllegalStateException
* if customer does not own a contact
*/
private void removeContact(Integer customerId) throws Exception {
assert customerId != null;
beginTx();
try {
Customer customer = (Customer) getEnvironment().getSessionFactory().getCurrentSession()
.load(Customer.class, customerId);
Set<Contact> contacts = customer.getContacts();
if (contacts.size() != 1) {
throw new IllegalStateException("can't remove contact: customer id=" + customerId
+ " expected exactly 1 contact, " + "actual count=" + contacts.size());
}
Contact contact = contacts.iterator().next();
contacts.remove(contact);
contact.setCustomer(null);
// explicitly delete Contact because hbm has no 'DELETE_ORPHAN' cascade?
// getEnvironment().getSessionFactory().getCurrentSession().delete(contact); //appears to
// not be needed
// assuming contact is persisted via cascade from customer
if (TERMINATE_ALL_USERS)
setRollbackOnlyTx();
} catch (Exception e) {
setRollbackOnlyTx(e);
} finally {
commitOrRollbackTx();
}
}
/**
* @return the customerIDs
*/
public Set<Integer> getCustomerIDs() {
return customerIDs;
}
private String statusOfRunnersToString(Set<UserRunner> runners) {
assert runners != null;
StringBuilder sb = new StringBuilder("TEST CONFIG [userCount=" + USER_COUNT
+ ", iterationsPerUser=" + ITERATION_COUNT + ", thinkTimeMillis="
+ THINK_TIME_MILLIS + "] " + " STATE of UserRunners: ");
for (UserRunner r : runners) {
sb.append(r.toString() + System.getProperty("line.separator"));
}
return sb.toString();
}
class UserRunner implements Callable<Void> {
private final CyclicBarrier barrier;
final private Integer customerId;
private int completedIterations = 0;
private Throwable causeOfFailure;
public UserRunner(Integer cId, CyclicBarrier barrier) {
assert cId != null;
this.customerId = cId;
this.barrier = barrier;
}
private boolean contactExists() throws Exception {
return getFirstContact(customerId) != null;
}
public Void call() throws Exception {
// name this thread for easier log tracing
Thread.currentThread().setName("UserRunnerThread-" + getCustomerId());
log.info("Wait for all executions paths to be ready to perform calls");
try {
// barrier.await();
for (int i = 0; i < ITERATION_COUNT && !TERMINATE_ALL_USERS; i++) {
if (contactExists())
throw new IllegalStateException("contact already exists before add, customerId=" + customerId);
addContact(customerId);
thinkRandomTime();
if (!contactExists())
throw new IllegalStateException("contact missing after successful add, customerId=" + customerId);
thinkRandomTime();
// read everyone's contacts
readEveryonesFirstContact();
thinkRandomTime();
removeContact(customerId);
if (contactExists())
throw new IllegalStateException("contact still exists after successful remove call, customerId=" + customerId);
thinkRandomTime();
++completedIterations;
if (log.isTraceEnabled()) log.trace("Iteration completed {0}", completedIterations);
}
} catch (Throwable t) {
TERMINATE_ALL_USERS = true;
log.error("Error", t);
throw new Exception(t);
// rollback current transaction if any
// really should not happen since above methods all follow begin-commit-rollback pattern
// try {
// if
// (DualNodeJtaTransactionManagerImpl.getInstance(DualNodeTestUtil.LOCAL).getTransaction()
// != null) {
// DualNodeJtaTransactionManagerImpl.getInstance(DualNodeTestUtil.LOCAL).rollback();
// }
// } catch (SystemException ex) {
// throw new RuntimeException("failed to rollback tx", ex);
// }
} finally {
log.info("Wait for all execution paths to finish");
barrier.await();
}
return null;
}
public boolean isSuccess() {
return ITERATION_COUNT == getCompletedIterations();
}
public int getCompletedIterations() {
return completedIterations;
}
public Throwable getCauseOfFailure() {
return causeOfFailure;
}
public Integer getCustomerId() {
return customerId;
}
@Override
public String toString() {
return super.toString() + "[customerId=" + getCustomerId() + " iterationsCompleted="
+ getCompletedIterations() + " completedAll=" + isSuccess() + " causeOfFailure="
+ (this.causeOfFailure != null ? getStackTrace(causeOfFailure) : "") + "] ";
}
}
public static String getStackTrace(Throwable throwable) {
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw, true);
throwable.printStackTrace(pw);
return sw.getBuffer().toString();
}
/**
* sleep between 0 and THINK_TIME_MILLIS.
*
* @throws RuntimeException
* if sleep is interrupted or TERMINATE_ALL_USERS flag was set to true i n the
* meantime
*/
private void thinkRandomTime() {
try {
Thread.sleep(random.nextInt(THINK_TIME_MILLIS));
} catch (InterruptedException ex) {
throw new RuntimeException("sleep interrupted", ex);
}
if (TERMINATE_ALL_USERS) {
throw new RuntimeException("told to terminate (because a UserRunner had failed)");
}
}
}

View File

@ -0,0 +1,121 @@
package org.hibernate.test.cache.infinispan.functional;
import java.util.Map;
import javax.transaction.Status;
import javax.transaction.TransactionManager;
import org.hibernate.cache.RegionFactory;
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
import org.hibernate.cfg.Configuration;
import org.hibernate.cfg.Environment;
import org.hibernate.connection.ConnectionProvider;
import org.hibernate.junit.functional.FunctionalTestCase;
import org.hibernate.stat.SecondLevelCacheStatistics;
import org.hibernate.stat.Statistics;
import org.hibernate.transaction.CMTTransactionFactory;
import org.hibernate.transaction.TransactionFactory;
import org.hibernate.transaction.TransactionManagerLookup;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* @author Galder Zamarreño
* @since 3.5
*/
public abstract class SingleNodeTestCase extends FunctionalTestCase {
private static final Log log = LogFactory.getLog(SingleNodeTestCase.class);
private final TransactionManager tm;
public SingleNodeTestCase(String string) {
super(string);
tm = getTransactionManager();
}
protected TransactionManager getTransactionManager() {
try {
return getTransactionManagerLookupClass().newInstance().getTransactionManager(null);
} catch (Exception e) {
log.error("Error", e);
throw new RuntimeException(e);
}
}
public String[] getMappings() {
return new String[] {
"cache/infinispan/functional/Item.hbm.xml",
"cache/infinispan/functional/Customer.hbm.xml",
"cache/infinispan/functional/Contact.hbm.xml"};
}
@Override
public String getCacheConcurrencyStrategy() {
return "transactional";
}
protected Class<? extends RegionFactory> getCacheRegionFactory() {
return InfinispanRegionFactory.class;
}
protected Class<? extends TransactionFactory> getTransactionFactoryClass() {
return CMTTransactionFactory.class;
}
protected Class<? extends ConnectionProvider> getConnectionProviderClass() {
return org.hibernate.test.cache.infinispan.tm.XaConnectionProvider.class;
}
protected Class<? extends TransactionManagerLookup> getTransactionManagerLookupClass() {
return org.hibernate.test.cache.infinispan.tm.XaTransactionManagerLookup.class;
}
protected boolean getUseQueryCache() {
return true;
}
public void configure(Configuration cfg) {
super.configure(cfg);
cfg.setProperty(Environment.USE_SECOND_LEVEL_CACHE, "true");
cfg.setProperty(Environment.GENERATE_STATISTICS, "true");
cfg.setProperty(Environment.USE_QUERY_CACHE, String.valueOf(getUseQueryCache()));
cfg.setProperty(Environment.CACHE_REGION_FACTORY, getCacheRegionFactory().getName());
cfg.setProperty(Environment.CONNECTION_PROVIDER, getConnectionProviderClass().getName());
cfg.setProperty(Environment.TRANSACTION_MANAGER_STRATEGY, getTransactionManagerLookupClass().getName());
cfg.setProperty(Environment.TRANSACTION_STRATEGY, getTransactionFactoryClass().getName());
}
public void testEmptySecondLevelCacheEntry() throws Exception {
getSessions().getCache().evictEntityRegion(Item.class.getName());
Statistics stats = getSessions().getStatistics();
stats.clear();
SecondLevelCacheStatistics statistics = stats.getSecondLevelCacheStatistics(Item.class.getName() + ".items");
Map cacheEntries = statistics.getEntries();
assertEquals(0, cacheEntries.size());
}
protected void beginTx() throws Exception {
tm.begin();
}
protected void setRollbackOnlyTx() throws Exception {
tm.setRollbackOnly();
}
protected void setRollbackOnlyTx(Exception e) throws Exception {
log.error("Error", e);
tm.setRollbackOnly();
throw e;
}
protected void setRollbackOnlyTxExpected(Exception e) throws Exception {
log.debug("Expected behaivour", e);
tm.setRollbackOnly();
}
protected void commitOrRollbackTx() throws Exception {
if (tm.getStatus() == Status.STATUS_ACTIVE) tm.commit();
else tm.rollback();
}
}

View File

@ -25,20 +25,25 @@ import java.util.HashSet;
import java.util.List;
import java.util.Set;
import javax.transaction.Status;
import javax.transaction.TransactionManager;
import org.hibernate.FlushMode;
import org.hibernate.cache.RegionFactory;
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
import org.hibernate.cfg.Configuration;
import org.hibernate.cfg.Environment;
import org.hibernate.classic.Session;
import org.hibernate.connection.ConnectionProvider;
import org.hibernate.junit.functional.FunctionalTestCase;
import org.hibernate.stat.SecondLevelCacheStatistics;
import org.hibernate.test.cache.infinispan.functional.Contact;
import org.hibernate.test.cache.infinispan.functional.Customer;
import org.hibernate.transaction.CMTTransactionFactory;
import org.hibernate.transaction.TransactionFactory;
import org.hibernate.transaction.TransactionManagerLookup;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* BulkOperationsTestCase.
@ -47,8 +52,7 @@ import org.slf4j.LoggerFactory;
* @since 3.5
*/
public class BulkOperationsTestCase extends FunctionalTestCase {
private static final Logger log = LoggerFactory.getLogger(BulkOperationsTestCase.class);
private static final Log log = LogFactory.getLog(BulkOperationsTestCase.class);
private TransactionManager tm;
@ -66,11 +70,15 @@ public class BulkOperationsTestCase extends FunctionalTestCase {
return "transactional";
}
protected Class getTransactionFactoryClass() {
protected Class<? extends RegionFactory> getCacheRegionFactory() {
return InfinispanRegionFactory.class;
}
protected Class<? extends TransactionFactory> getTransactionFactoryClass() {
return CMTTransactionFactory.class;
}
protected Class getConnectionProviderClass() {
protected Class<? extends ConnectionProvider> getConnectionProviderClass() {
return org.hibernate.test.cache.infinispan.tm.XaConnectionProvider.class;
}
@ -80,20 +88,17 @@ public class BulkOperationsTestCase extends FunctionalTestCase {
public void configure(Configuration cfg) {
super.configure(cfg);
cfg.setProperty(Environment.USE_SECOND_LEVEL_CACHE, "true");
cfg.setProperty(Environment.GENERATE_STATISTICS, "true");
cfg.setProperty(Environment.USE_QUERY_CACHE, "false");
cfg.setProperty(Environment.CACHE_REGION_FACTORY, getCacheRegionFactory().getName());
cfg.setProperty(Environment.CONNECTION_PROVIDER, getConnectionProviderClass().getName());
cfg.setProperty(Environment.TRANSACTION_MANAGER_STRATEGY, getTransactionManagerLookupClass()
.getName());
Class transactionFactory = getTransactionFactoryClass();
cfg.setProperty(Environment.TRANSACTION_STRATEGY, transactionFactory.getName());
cfg.setProperty(Environment.TRANSACTION_MANAGER_STRATEGY, getTransactionManagerLookupClass().getName());
cfg.setProperty(Environment.TRANSACTION_STRATEGY, getTransactionFactoryClass().getName());
}
public void testBulkOperations() throws Throwable {
System.out.println("*** testBulkOperations()");
log.info("*** testBulkOperations()");
boolean cleanedUp = false;
try {
tm = getTransactionManagerLookupClass().newInstance().getTransactionManager(null);
@ -166,13 +171,14 @@ public class BulkOperationsTestCase extends FunctionalTestCase {
log.debug("Create 10 contacts");
tm.begin();
try {
for (int i = 0; i < 10; i++)
createCustomer(i);
tm.commit();
for (int i = 0; i < 10; i++) createCustomer(i);
} catch (Exception e) {
log.error("Unable to create customer", e);
tm.rollback();
tm.setRollbackOnly();
throw e;
} finally {
if (tm.getStatus() == Status.STATUS_ACTIVE) tm.commit();
else tm.rollback();
}
}
@ -183,19 +189,25 @@ public class BulkOperationsTestCase extends FunctionalTestCase {
tm.begin();
try {
Session session = getSessions().getCurrentSession();
int rowsAffected = session.createQuery(deleteHQL).setFlushMode(FlushMode.AUTO)
.setParameter("cName", "Red Hat").executeUpdate();
tm.commit();
return rowsAffected;
} catch (Exception e) {
try {
tm.rollback();
} catch (Exception ee) {
// ignored
}
log.error("Unable to delete contac", e);
tm.setRollbackOnly();
throw e;
} finally {
if (tm.getStatus() == Status.STATUS_ACTIVE) {
tm.commit();
} else {
try {
tm.rollback();
} catch (Exception ee) {
// ignored
}
}
}
}
@ -210,11 +222,14 @@ public class BulkOperationsTestCase extends FunctionalTestCase {
Session session = getSessions().getCurrentSession();
List results = session.createQuery(selectHQL).setFlushMode(FlushMode.AUTO).setParameter(
"cName", customerName).list();
tm.commit();
return results;
} catch (Exception e) {
tm.rollback();
log.error("Unable to get contacts by customer", e);
tm.setRollbackOnly();
throw e;
} finally {
if (tm.getStatus() == Status.STATUS_ACTIVE) tm.commit();
else tm.rollback();
}
}
@ -224,15 +239,17 @@ public class BulkOperationsTestCase extends FunctionalTestCase {
tm.begin();
try {
Session session = getSessions().getCurrentSession();
List results = session.createQuery(selectHQL).setFlushMode(FlushMode.AUTO).setParameter(
"cTLF", tlf).list();
tm.commit();
return results;
} catch (Exception e) {
tm.rollback();
log.error("Unable to get contacts", e);
tm.setRollbackOnly();
throw e;
} finally {
if (tm.getStatus() == Status.STATUS_ACTIVE) tm.commit();
else tm.rollback();
}
}
@ -243,11 +260,14 @@ public class BulkOperationsTestCase extends FunctionalTestCase {
Session session = getSessions().getCurrentSession();
int rowsAffected = session.createQuery(updateHQL).setFlushMode(FlushMode.AUTO)
.setParameter("cNewTLF", newTLF).setParameter("cName", name).executeUpdate();
tm.commit();
return rowsAffected;
} catch (Exception e) {
tm.rollback();
log.error("Unable to update contacts", e);
tm.setRollbackOnly();
throw e;
} finally {
if (tm.getStatus() == Status.STATUS_ACTIVE) tm.commit();
else tm.rollback();
}
}
@ -262,25 +282,30 @@ public class BulkOperationsTestCase extends FunctionalTestCase {
list.get(0).setTlf(newTLF);
int rowsAffected = session.createQuery(updateHQL).setFlushMode(FlushMode.AUTO)
.setParameter("cNewTLF", newTLF).setParameter("cName", name).executeUpdate();
tm.commit();
return rowsAffected;
} catch (Exception e) {
tm.rollback();
log.error("Unable to update contacts with one manual", e);
tm.setRollbackOnly();
throw e;
} finally {
if (tm.getStatus() == Status.STATUS_ACTIVE) tm.commit();
else tm.rollback();
}
}
public Contact getContact(Integer id) throws Exception {
tm.begin();
try {
Session session = getSessions().getCurrentSession();
Contact contact = (Contact) session.get(Contact.class, id);
tm.commit();
return contact;
} catch (Exception e) {
tm.rollback();
log.error("Unable to get contact", e);
tm.setRollbackOnly();
throw e;
} finally {
if (tm.getStatus() == Status.STATUS_ACTIVE) tm.commit();
else tm.rollback();
}
}
@ -292,15 +317,21 @@ public class BulkOperationsTestCase extends FunctionalTestCase {
Session session = getSessions().getCurrentSession();
session.createQuery(deleteContactHQL).setFlushMode(FlushMode.AUTO).executeUpdate();
session.createQuery(deleteCustomerHQL).setFlushMode(FlushMode.AUTO).executeUpdate();
tm.commit();
} catch (Exception e) {
if (!ignore) {
try {
tm.rollback();
} catch (Exception ee) {
// ignored
log.error("Unable to get contact", e);
tm.setRollbackOnly();
throw e;
} finally {
if (tm.getStatus() == Status.STATUS_ACTIVE) {
tm.commit();
} else {
if (!ignore) {
try {
tm.rollback();
} catch (Exception ee) {
// ignored
}
}
throw e;
}
}
}

View File

@ -30,8 +30,8 @@ import javax.transaction.TransactionManager;
import org.hibernate.Query;
import org.hibernate.Session;
import org.hibernate.SessionFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Comment
@ -39,7 +39,7 @@ import org.slf4j.LoggerFactory;
* @author Brian Stansberry
*/
public class ClassLoaderTestDAO {
private static final Logger log = LoggerFactory.getLogger(ClassLoaderTestDAO.class);
private static final Log log = LogFactory.getLog(ClassLoaderTestDAO.class);
private SessionFactory sessionFactory;
private TransactionManager tm;

View File

@ -30,7 +30,7 @@ import org.hibernate.SessionFactory;
import org.hibernate.cache.StandardQueryCache;
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
import org.hibernate.cfg.Configuration;
import org.hibernate.test.cache.infinispan.functional.cluster.AbstractDualNodeTestCase;
import org.hibernate.test.cache.infinispan.functional.cluster.DualNodeTestCase;
import org.hibernate.test.cache.infinispan.functional.cluster.ClusterAwareRegionFactory;
import org.hibernate.test.cache.infinispan.functional.cluster.DualNodeJtaTransactionManagerImpl;
import org.infinispan.Cache;
@ -51,7 +51,7 @@ import org.slf4j.LoggerFactory;
* @author Galder Zamarreño
* @since 3.5
*/
public class IsolatedClassLoaderTest extends AbstractDualNodeTestCase {
public class IsolatedClassLoaderTest extends DualNodeTestCase {
public static final String OUR_PACKAGE = IsolatedClassLoaderTest.class.getPackage().getName();
@ -119,11 +119,11 @@ public class IsolatedClassLoaderTest extends AbstractDualNodeTestCase {
public void testIsolatedSetup() throws Exception {
// Bind a listener to the "local" cache
// Our region factory makes its CacheManager available to us
CacheManager localManager = ClusterAwareRegionFactory.getCacheManager(AbstractDualNodeTestCase.LOCAL);
CacheManager localManager = ClusterAwareRegionFactory.getCacheManager(DualNodeTestCase.LOCAL);
Cache localReplicatedCache = localManager.getCache("replicated-entity");
// Bind a listener to the "remote" cache
CacheManager remoteManager = ClusterAwareRegionFactory.getCacheManager(AbstractDualNodeTestCase.REMOTE);
CacheManager remoteManager = ClusterAwareRegionFactory.getCacheManager(DualNodeTestCase.REMOTE);
Cache remoteReplicatedCache = remoteManager.getCache("replicated-entity");
ClassLoader cl = Thread.currentThread().getContextClassLoader();
@ -163,20 +163,20 @@ public class IsolatedClassLoaderTest extends AbstractDualNodeTestCase {
protected void queryTest(boolean useNamedRegion) throws Exception {
// Bind a listener to the "local" cache
// Our region factory makes its CacheManager available to us
CacheManager localManager = ClusterAwareRegionFactory.getCacheManager(AbstractDualNodeTestCase.LOCAL);
CacheManager localManager = ClusterAwareRegionFactory.getCacheManager(DualNodeTestCase.LOCAL);
localQueryCache = localManager.getCache("replicated-query");
localQueryListener = new CacheAccessListener();
localQueryCache.addListener(localQueryListener);
TransactionManager localTM = DualNodeJtaTransactionManagerImpl.getInstance(AbstractDualNodeTestCase.LOCAL);
TransactionManager localTM = DualNodeJtaTransactionManagerImpl.getInstance(DualNodeTestCase.LOCAL);
// Bind a listener to the "remote" cache
CacheManager remoteManager = ClusterAwareRegionFactory.getCacheManager(AbstractDualNodeTestCase.REMOTE);
CacheManager remoteManager = ClusterAwareRegionFactory.getCacheManager(DualNodeTestCase.REMOTE);
remoteQueryCache = remoteManager.getCache("replicated-query");
remoteQueryListener = new CacheAccessListener();
remoteQueryCache.addListener(remoteQueryListener);
TransactionManager remoteTM = DualNodeJtaTransactionManagerImpl.getInstance(AbstractDualNodeTestCase.REMOTE);
TransactionManager remoteTM = DualNodeJtaTransactionManagerImpl.getInstance(DualNodeTestCase.REMOTE);
SessionFactory localFactory = getEnvironment().getSessionFactory();
SessionFactory remoteFactory = getSecondNodeEnvironment().getSessionFactory();

View File

@ -34,8 +34,8 @@ import org.hibernate.cache.TimestampsRegion;
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
import org.hibernate.cfg.Settings;
import org.infinispan.manager.CacheManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* ClusterAwareRegionFactory.
@ -45,7 +45,7 @@ import org.slf4j.LoggerFactory;
*/
public class ClusterAwareRegionFactory implements RegionFactory {
private static final Logger log = LoggerFactory.getLogger(ClusterAwareRegionFactory.class);
private static final Log log = LogFactory.getLog(ClusterAwareRegionFactory.class);
private static final Hashtable<String, CacheManager> cacheManagers = new Hashtable<String, CacheManager>();
private final InfinispanRegionFactory delegate = new InfinispanRegionFactory();
@ -75,7 +75,7 @@ public class ClusterAwareRegionFactory implements RegionFactory {
}
public void start(Settings settings, Properties properties) throws CacheException {
cacheManagerName = properties.getProperty(AbstractDualNodeTestCase.NODE_ID_PROP);
cacheManagerName = properties.getProperty(DualNodeTestCase.NODE_ID_PROP);
CacheManager existing = getCacheManager(cacheManagerName);
locallyAdded = (existing == null);

View File

@ -47,9 +47,9 @@ public class DualNodeConnectionProviderImpl implements ConnectionProvider {
}
public void configure(Properties props) throws HibernateException {
nodeId = props.getProperty(AbstractDualNodeTestCase.NODE_ID_PROP);
nodeId = props.getProperty(DualNodeTestCase.NODE_ID_PROP);
if (nodeId == null)
throw new HibernateException(AbstractDualNodeTestCase.NODE_ID_PROP + " not configured");
throw new HibernateException(DualNodeTestCase.NODE_ID_PROP + " not configured");
}
public Connection getConnection() throws SQLException {

View File

@ -42,8 +42,8 @@ import javax.transaction.xa.XAException;
import javax.transaction.xa.XAResource;
import javax.transaction.xa.Xid;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* SimpleJtaTransactionImpl variant that works with DualNodeTransactionManagerImpl.
@ -51,7 +51,7 @@ import org.slf4j.LoggerFactory;
* @author Brian Stansberry
*/
public class DualNodeJtaTransactionImpl implements Transaction {
private static final Logger log = LoggerFactory.getLogger(DualNodeJtaTransactionImpl.class);
private static final Log log = LogFactory.getLog(DualNodeJtaTransactionImpl.class);
private int status;
private LinkedList synchronizations;

View File

@ -35,8 +35,8 @@ import javax.transaction.SystemException;
import javax.transaction.Transaction;
import javax.transaction.TransactionManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Variant of SimpleJtaTransactionManagerImpl that doesn't use a VM-singleton, but rather a set of
@ -46,7 +46,7 @@ import org.slf4j.LoggerFactory;
*/
public class DualNodeJtaTransactionManagerImpl implements TransactionManager {
private static final Logger log = LoggerFactory.getLogger(DualNodeJtaTransactionManagerImpl.class);
private static final Log log = LogFactory.getLog(DualNodeJtaTransactionManagerImpl.class);
private static final Hashtable INSTANCES = new Hashtable();

View File

@ -21,10 +21,7 @@
*/
package org.hibernate.test.cache.infinispan.functional.cluster;
import java.util.Set;
import org.hibernate.Session;
import org.hibernate.cache.infinispan.util.CacheHelper;
import org.hibernate.cfg.Configuration;
import org.hibernate.cfg.Environment;
import org.hibernate.cfg.Mappings;
@ -33,8 +30,8 @@ import org.hibernate.engine.SessionFactoryImplementor;
import org.hibernate.junit.functional.ExecutionEnvironment;
import org.hibernate.junit.functional.FunctionalTestCase;
import org.hibernate.transaction.CMTTransactionFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* AbstractDualNodeTestCase.
@ -42,16 +39,16 @@ import org.slf4j.LoggerFactory;
* @author Galder Zamarreño
* @since 3.5
*/
public abstract class AbstractDualNodeTestCase extends FunctionalTestCase {
public abstract class DualNodeTestCase extends FunctionalTestCase {
private static final Logger log = LoggerFactory.getLogger(AbstractDualNodeTestCase.class);
private static final Log log = LogFactory.getLog(DualNodeTestCase.class);
public static final String NODE_ID_PROP = "hibernate.test.cluster.node.id";
public static final String LOCAL = "local";
public static final String REMOTE = "remote";
private ExecutionEnvironment secondNodeEnvironment;
private Session secondNodeSession;
public AbstractDualNodeTestCase(String string) {
public DualNodeTestCase(String string) {
super(string);
}
@ -164,6 +161,10 @@ public abstract class AbstractDualNodeTestCase extends FunctionalTestCase {
}
}
protected boolean getUseQueryCache() {
return true;
}
protected void standardConfigure(Configuration cfg) {
super.configure(cfg);
@ -171,6 +172,7 @@ public abstract class AbstractDualNodeTestCase extends FunctionalTestCase {
cfg.setProperty(Environment.TRANSACTION_MANAGER_STRATEGY, getTransactionManagerLookupClass().getName());
cfg.setProperty(Environment.TRANSACTION_STRATEGY, getTransactionFactoryClass().getName());
cfg.setProperty(Environment.CACHE_REGION_FACTORY, getCacheRegionFactory().getName());
cfg.setProperty(Environment.USE_QUERY_CACHE, String.valueOf(getUseQueryCache()));
}
/**
@ -178,10 +180,10 @@ public abstract class AbstractDualNodeTestCase extends FunctionalTestCase {
* configure method to allow separate cache settings for the second node.
*/
public class SecondNodeSettings implements ExecutionEnvironment.Settings {
private final AbstractDualNodeTestCase delegate;
private final DualNodeTestCase delegate;
public SecondNodeSettings() {
this.delegate = AbstractDualNodeTestCase.this;
this.delegate = DualNodeTestCase.this;
}
/**

View File

@ -39,9 +39,9 @@ import org.hibernate.HibernateException;
public class DualNodeTransactionManagerLookup implements TransactionManagerLookup {
public TransactionManager getTransactionManager(Properties props) throws HibernateException {
String nodeId = props.getProperty(AbstractDualNodeTestCase.NODE_ID_PROP);
String nodeId = props.getProperty(DualNodeTestCase.NODE_ID_PROP);
if (nodeId == null)
throw new HibernateException(AbstractDualNodeTestCase.NODE_ID_PROP + " not configured");
throw new HibernateException(DualNodeTestCase.NODE_ID_PROP + " not configured");
return DualNodeJtaTransactionManagerImpl.getInstance(nodeId);
}

View File

@ -39,8 +39,8 @@ import org.infinispan.notifications.Listener;
import org.infinispan.notifications.cachelistener.annotation.CacheEntryVisited;
import org.infinispan.notifications.cachelistener.event.CacheEntryVisitedEvent;
import org.jboss.util.collection.ConcurrentSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* EntityCollectionInvalidationTestCase.
@ -48,8 +48,8 @@ import org.slf4j.LoggerFactory;
* @author Galder Zamarreño
* @since 3.5
*/
public class EntityCollectionInvalidationTestCase extends AbstractDualNodeTestCase {
private static final Logger log = LoggerFactory.getLogger(EntityCollectionInvalidationTestCase.class);
public class EntityCollectionInvalidationTestCase extends DualNodeTestCase {
private static final Log log = LogFactory.getLog(EntityCollectionInvalidationTestCase.class);
private static final long SLEEP_TIME = 50l;
private static final Integer CUSTOMER_ID = new Integer(1);
static int test = 0;
@ -67,7 +67,7 @@ public class EntityCollectionInvalidationTestCase extends AbstractDualNodeTestCa
// Bind a listener to the "local" cache
// Our region factory makes its CacheManager available to us
CacheManager localManager = ClusterAwareRegionFactory.getCacheManager(AbstractDualNodeTestCase.LOCAL);
CacheManager localManager = ClusterAwareRegionFactory.getCacheManager(DualNodeTestCase.LOCAL);
// Cache localCache = localManager.getCache("entity");
Cache localCustomerCache = localManager.getCache(Customer.class.getName());
Cache localContactCache = localManager.getCache(Contact.class.getName());
@ -76,10 +76,10 @@ public class EntityCollectionInvalidationTestCase extends AbstractDualNodeTestCa
localCustomerCache.addListener(localListener);
localContactCache.addListener(localListener);
localCollectionCache.addListener(localListener);
TransactionManager localTM = DualNodeJtaTransactionManagerImpl.getInstance(AbstractDualNodeTestCase.LOCAL);
TransactionManager localTM = DualNodeJtaTransactionManagerImpl.getInstance(DualNodeTestCase.LOCAL);
// Bind a listener to the "remote" cache
CacheManager remoteManager = ClusterAwareRegionFactory.getCacheManager(AbstractDualNodeTestCase.REMOTE);
CacheManager remoteManager = ClusterAwareRegionFactory.getCacheManager(DualNodeTestCase.REMOTE);
Cache remoteCustomerCache = remoteManager.getCache(Customer.class.getName());
Cache remoteContactCache = remoteManager.getCache(Contact.class.getName());
Cache remoteCollectionCache = remoteManager.getCache(Customer.class.getName() + ".contacts");
@ -87,7 +87,7 @@ public class EntityCollectionInvalidationTestCase extends AbstractDualNodeTestCa
remoteCustomerCache.addListener(remoteListener);
remoteContactCache.addListener(remoteListener);
remoteCollectionCache.addListener(remoteListener);
TransactionManager remoteTM = DualNodeJtaTransactionManagerImpl.getInstance(AbstractDualNodeTestCase.REMOTE);
TransactionManager remoteTM = DualNodeJtaTransactionManagerImpl.getInstance(DualNodeTestCase.REMOTE);
SessionFactory localFactory = getEnvironment().getSessionFactory();
SessionFactory remoteFactory = getSecondNodeEnvironment().getSessionFactory();
@ -319,7 +319,7 @@ public class EntityCollectionInvalidationTestCase extends AbstractDualNodeTestCa
@Listener
public static class MyListener {
private static final Logger log = LoggerFactory.getLogger(MyListener.class);
private static final Log log = LogFactory.getLog(MyListener.class);
private Set<String> visited = new ConcurrentSet<String>();
private final String name;

View File

@ -41,7 +41,7 @@ import org.slf4j.LoggerFactory;
* @author Galder Zamarreño
* @since 3.5
*/
public class SessionRefreshTestCase extends AbstractDualNodeTestCase {
public class SessionRefreshTestCase extends DualNodeTestCase {
public static final String OUR_PACKAGE = SessionRefreshTestCase.class.getPackage().getName();
@ -90,41 +90,41 @@ public class SessionRefreshTestCase extends AbstractDualNodeTestCase {
public void testRefreshAfterExternalChange() throws Exception {
// First session factory uses a cache
CacheManager localManager = ClusterAwareRegionFactory.getCacheManager(AbstractDualNodeTestCase.LOCAL);
CacheManager localManager = ClusterAwareRegionFactory.getCacheManager(DualNodeTestCase.LOCAL);
localCache = localManager.getCache(Account.class.getName());
TransactionManager localTM = DualNodeJtaTransactionManagerImpl.getInstance(AbstractDualNodeTestCase.LOCAL);
TransactionManager localTM = DualNodeJtaTransactionManagerImpl.getInstance(DualNodeTestCase.LOCAL);
SessionFactory localFactory = getEnvironment().getSessionFactory();
// Second session factory doesn't; just needs a transaction manager
TransactionManager remoteTM = DualNodeJtaTransactionManagerImpl.getInstance(AbstractDualNodeTestCase.REMOTE);
TransactionManager remoteTM = DualNodeJtaTransactionManagerImpl.getInstance(DualNodeTestCase.REMOTE);
SessionFactory remoteFactory = getSecondNodeEnvironment().getSessionFactory();
ClassLoaderTestDAO dao0 = new ClassLoaderTestDAO(localFactory, localTM);
ClassLoaderTestDAO dao1 = new ClassLoaderTestDAO(remoteFactory, remoteTM);
Integer id = new Integer(1);
dao0.createAccount(dao0.getSmith(), id, new Integer(5), AbstractDualNodeTestCase.LOCAL);
dao0.createAccount(dao0.getSmith(), id, new Integer(5), DualNodeTestCase.LOCAL);
// Basic sanity check
Account acct1 = dao1.getAccount(id);
assertNotNull(acct1);
assertEquals(AbstractDualNodeTestCase.LOCAL, acct1.getBranch());
assertEquals(DualNodeTestCase.LOCAL, acct1.getBranch());
// This dao's session factory isn't caching, so cache won't see this change
dao1.updateAccountBranch(id, AbstractDualNodeTestCase.REMOTE);
dao1.updateAccountBranch(id, DualNodeTestCase.REMOTE);
// dao1's session doesn't touch the cache,
// so reading from dao0 should show a stale value from the cache
// (we check to confirm the cache is used)
Account acct0 = dao0.getAccount(id);
assertNotNull(acct0);
assertEquals(AbstractDualNodeTestCase.LOCAL, acct0.getBranch());
assertEquals(DualNodeTestCase.LOCAL, acct0.getBranch());
log.debug("Contents when re-reading from local: " + TestingUtil.printCache(localCache));
// Now call session.refresh and confirm we get the correct value
acct0 = dao0.getAccountWithRefresh(id);
assertNotNull(acct0);
assertEquals(AbstractDualNodeTestCase.REMOTE, acct0.getBranch());
assertEquals(DualNodeTestCase.REMOTE, acct0.getBranch());
log.debug("Contents after refreshing in remote: " + TestingUtil.printCache(localCache));
// Double check with a brand new session, in case the other session
@ -132,7 +132,7 @@ public class SessionRefreshTestCase extends AbstractDualNodeTestCase {
ClassLoaderTestDAO dao0A = new ClassLoaderTestDAO(localFactory, localTM);
Account acct0A = dao0A.getAccount(id);
assertNotNull(acct0A);
assertEquals(AbstractDualNodeTestCase.REMOTE, acct0A.getBranch());
assertEquals(DualNodeTestCase.REMOTE, acct0A.getBranch());
log.debug("Contents after creating a new session: " + TestingUtil.printCache(localCache));
}
}

View File

@ -40,8 +40,8 @@ import javax.transaction.xa.XAException;
import javax.transaction.xa.XAResource;
import javax.transaction.xa.Xid;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* XaResourceCapableTransactionImpl.
@ -50,7 +50,7 @@ import org.slf4j.LoggerFactory;
* @since 3.5
*/
public class XaTransactionImpl implements Transaction {
private static final Logger log = LoggerFactory.getLogger(XaTransactionImpl.class);
private static final Log log = LogFactory.getLog(XaTransactionImpl.class);
private int status;
private LinkedList synchronizations;
private Connection connection; // the only resource we care about is jdbc connection

View File

@ -34,7 +34,3 @@ hibernate.format_sql true
hibernate.max_fetch_depth 5
hibernate.generate_statistics true
hibernate.cache.use_second_level_cache true
hibernate.cache.use_query_cache true
hibernate.cache.region.factory_class org.hibernate.cache.infinispan.InfinispanRegionFactory