HHH-7640 Improve single node Infinispan 2LC performance
* Use an Infinispan cache to maintain pending puts, which avoids the need to control it's memory consumption in the critical path of putFromLoad. * This cache is shared by all regions, and it's configured with aggressive expiration settings to avoid piling up pending put operations. * Added a 2LC stress test that tests behaivour and performance of 2LC under multiple situations, such as entity inserts, updates, find via PK, find via query and deletes. * Some other minor performance enhancements, such as avoiding classloader aware cache wrapper if using 2LC locally. * Remove cache adapter to reduce construction of useless objects. * Cache flagged caches in order to avoid recomputing decorated caches all the time, which reduces memory consumption. * Skip locking for timestamp updates and separate timestamp region implementations for local vs clustered scenarios.
This commit is contained in:
parent
2b4097aa49
commit
a074d3244d
|
@ -32,3 +32,10 @@ bin
|
|||
# Miscellaneous
|
||||
*.log
|
||||
.clover
|
||||
|
||||
# JBoss Transactions
|
||||
ObjectStore
|
||||
|
||||
# Profiler and heap dumps
|
||||
*.jps
|
||||
*.hprof
|
||||
|
|
|
@ -61,6 +61,8 @@ public class StandardQueryCache implements QueryCache {
|
|||
StandardQueryCache.class.getName()
|
||||
);
|
||||
|
||||
private static final boolean tracing = LOG.isTraceEnabled();
|
||||
|
||||
private QueryResultsRegion cacheRegion;
|
||||
private UpdateTimestampsCache updateTimestampsCache;
|
||||
|
||||
|
@ -241,7 +243,7 @@ public class StandardQueryCache implements QueryCache {
|
|||
}
|
||||
|
||||
private static void logCachedResultRowDetails(Type[] returnTypes, Object[] tuple) {
|
||||
if ( !LOG.isTraceEnabled() ) {
|
||||
if ( !tracing ) {
|
||||
return;
|
||||
}
|
||||
if ( tuple == null ) {
|
||||
|
|
|
@ -94,6 +94,8 @@ public class StatefulPersistenceContext implements PersistenceContext {
|
|||
|
||||
private static final CoreMessageLogger LOG = Logger.getMessageLogger( CoreMessageLogger.class, StatefulPersistenceContext.class.getName() );
|
||||
|
||||
private static final boolean tracing = LOG.isTraceEnabled();
|
||||
|
||||
public static final Object NO_ROW = new MarkerObject( "NO_ROW" );
|
||||
|
||||
private static final int INIT_COLL_SIZE = 8;
|
||||
|
@ -1004,7 +1006,9 @@ public class StatefulPersistenceContext implements PersistenceContext {
|
|||
@Override
|
||||
public void initializeNonLazyCollections() throws HibernateException {
|
||||
if ( loadCounter == 0 ) {
|
||||
LOG.debug( "Initializing non-lazy collections" );
|
||||
if (tracing)
|
||||
LOG.trace( "Initializing non-lazy collections" );
|
||||
|
||||
//do this work only at the very highest level of the load
|
||||
loadCounter++; //don't let this method be called recursively
|
||||
try {
|
||||
|
|
|
@ -181,6 +181,8 @@ public final class SessionImpl extends AbstractSessionImpl implements EventSourc
|
|||
|
||||
private static final CoreMessageLogger LOG = Logger.getMessageLogger(CoreMessageLogger.class, SessionImpl.class.getName());
|
||||
|
||||
private static final boolean tracing = LOG.isTraceEnabled();
|
||||
|
||||
private transient long timestamp;
|
||||
|
||||
private transient SessionOwner sessionOwner;
|
||||
|
@ -310,7 +312,8 @@ public final class SessionImpl extends AbstractSessionImpl implements EventSourc
|
|||
factory.getStatisticsImplementor().openSession();
|
||||
}
|
||||
|
||||
LOG.debugf( "Opened session at timestamp: %s", timestamp );
|
||||
if (tracing)
|
||||
LOG.tracef( "Opened session at timestamp: %s", timestamp );
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -13,6 +13,7 @@ dependencies {
|
|||
testCompile( libraries.jnp_client )
|
||||
testCompile( libraries.jnp_server )
|
||||
testCompile( libraries.rhq )
|
||||
testCompile ('mysql:mysql-connector-java:5.1.17')
|
||||
}
|
||||
|
||||
test {
|
||||
|
@ -27,3 +28,15 @@ test {
|
|||
enabled = true
|
||||
}
|
||||
|
||||
task packageTests(type: Jar) {
|
||||
from sourceSets.test.output
|
||||
classifier = 'tests'
|
||||
}
|
||||
|
||||
task sourcesTestJar(type: Jar, dependsOn:classes) {
|
||||
from sourceSets.test.allSource
|
||||
classifier = 'test-sources'
|
||||
}
|
||||
|
||||
artifacts.archives packageTests
|
||||
artifacts.archives sourcesTestJar
|
||||
|
|
|
@ -10,15 +10,20 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
import javax.transaction.TransactionManager;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.hibernate.cache.infinispan.timestamp.ClusteredTimestampsRegionImpl;
|
||||
import org.hibernate.cache.infinispan.util.Caches;
|
||||
import org.infinispan.AdvancedCache;
|
||||
import org.infinispan.Cache;
|
||||
import org.infinispan.commands.module.ModuleCommandFactory;
|
||||
import org.infinispan.config.Configuration;
|
||||
import org.infinispan.configuration.cache.CacheMode;
|
||||
import org.infinispan.configuration.cache.ConfigurationBuilder;
|
||||
import org.infinispan.factories.GlobalComponentRegistry;
|
||||
import org.infinispan.manager.DefaultCacheManager;
|
||||
import org.infinispan.manager.EmbeddedCacheManager;
|
||||
import org.infinispan.transaction.TransactionMode;
|
||||
import org.infinispan.util.concurrent.IsolationLevel;
|
||||
import org.infinispan.util.logging.Log;
|
||||
import org.infinispan.util.logging.LogFactory;
|
||||
|
||||
|
@ -34,8 +39,6 @@ import org.hibernate.cache.infinispan.query.QueryResultsRegionImpl;
|
|||
import org.hibernate.cache.infinispan.timestamp.TimestampTypeOverrides;
|
||||
import org.hibernate.cache.infinispan.timestamp.TimestampsRegionImpl;
|
||||
import org.hibernate.cache.infinispan.tm.HibernateTransactionManagerLookup;
|
||||
import org.hibernate.cache.infinispan.util.CacheAdapter;
|
||||
import org.hibernate.cache.infinispan.util.CacheAdapterImpl;
|
||||
import org.hibernate.cache.spi.CollectionRegion;
|
||||
import org.hibernate.cache.spi.EntityRegion;
|
||||
import org.hibernate.cache.spi.NaturalIdRegion;
|
||||
|
@ -166,6 +169,11 @@ public class InfinispanRegionFactory implements RegionFactory {
|
|||
*/
|
||||
public static final boolean DEF_USE_SYNCHRONIZATION = true;
|
||||
|
||||
/**
|
||||
* Name of the pending puts cache.
|
||||
*/
|
||||
public static final String PENDING_PUTS_CACHE_NAME = "pending-puts";
|
||||
|
||||
private EmbeddedCacheManager manager;
|
||||
|
||||
private final Map<String, TypeOverrides> typeOverrides = new HashMap<String, TypeOverrides>();
|
||||
|
@ -174,8 +182,6 @@ public class InfinispanRegionFactory implements RegionFactory {
|
|||
|
||||
private org.infinispan.transaction.lookup.TransactionManagerLookup transactionManagerlookup;
|
||||
|
||||
private TransactionManager transactionManager;
|
||||
|
||||
private List<String> regionNames = new ArrayList<String>();
|
||||
|
||||
/**
|
||||
|
@ -197,8 +203,8 @@ public class InfinispanRegionFactory implements RegionFactory {
|
|||
public CollectionRegion buildCollectionRegion(String regionName, Properties properties, CacheDataDescription metadata) throws CacheException {
|
||||
if (log.isDebugEnabled()) log.debug("Building collection cache region [" + regionName + "]");
|
||||
AdvancedCache cache = getCache(regionName, COLLECTION_KEY, properties);
|
||||
CacheAdapter cacheAdapter = CacheAdapterImpl.newInstance(cache);
|
||||
CollectionRegionImpl region = new CollectionRegionImpl(cacheAdapter, regionName, metadata, transactionManager, this);
|
||||
CollectionRegionImpl region = new CollectionRegionImpl(
|
||||
cache, regionName, metadata, this);
|
||||
startRegion(region, regionName);
|
||||
return region;
|
||||
}
|
||||
|
@ -207,8 +213,8 @@ public class InfinispanRegionFactory implements RegionFactory {
|
|||
public EntityRegion buildEntityRegion(String regionName, Properties properties, CacheDataDescription metadata) throws CacheException {
|
||||
if (log.isDebugEnabled()) log.debug("Building entity cache region [" + regionName + "]");
|
||||
AdvancedCache cache = getCache(regionName, ENTITY_KEY, properties);
|
||||
CacheAdapter cacheAdapter = CacheAdapterImpl.newInstance(cache);
|
||||
EntityRegionImpl region = new EntityRegionImpl(cacheAdapter, regionName, metadata, transactionManager, this);
|
||||
EntityRegionImpl region = new EntityRegionImpl(
|
||||
cache, regionName, metadata, this);
|
||||
startRegion(region, regionName);
|
||||
return region;
|
||||
}
|
||||
|
@ -216,19 +222,13 @@ public class InfinispanRegionFactory implements RegionFactory {
|
|||
@Override
|
||||
public NaturalIdRegion buildNaturalIdRegion(String regionName, Properties properties, CacheDataDescription metadata)
|
||||
throws CacheException {
|
||||
if ( log.isDebugEnabled() ) {
|
||||
log.debug( "Building natural id cache region [" + regionName + "]" );
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Building natural id cache region [" + regionName + "]");
|
||||
}
|
||||
AdvancedCache cache = getCache( regionName, NATURAL_ID_KEY, properties );
|
||||
CacheAdapter cacheAdapter = CacheAdapterImpl.newInstance( cache );
|
||||
AdvancedCache cache = getCache(regionName, NATURAL_ID_KEY, properties);
|
||||
NaturalIdRegionImpl region = new NaturalIdRegionImpl(
|
||||
cacheAdapter,
|
||||
regionName,
|
||||
metadata,
|
||||
transactionManager,
|
||||
this
|
||||
);
|
||||
startRegion( region, regionName );
|
||||
cache, regionName, metadata, this);
|
||||
startRegion(region, regionName);
|
||||
return region;
|
||||
}
|
||||
|
||||
|
@ -244,8 +244,8 @@ public class InfinispanRegionFactory implements RegionFactory {
|
|||
cacheName = regionName;
|
||||
|
||||
AdvancedCache cache = getCache(cacheName, QUERY_KEY, properties);
|
||||
CacheAdapter cacheAdapter = CacheAdapterImpl.newInstance(cache);
|
||||
QueryResultsRegionImpl region = new QueryResultsRegionImpl(cacheAdapter, regionName, properties, transactionManager, this);
|
||||
QueryResultsRegionImpl region = new QueryResultsRegionImpl(
|
||||
cache, regionName, this);
|
||||
startRegion(region, regionName);
|
||||
return region;
|
||||
}
|
||||
|
@ -257,14 +257,17 @@ public class InfinispanRegionFactory implements RegionFactory {
|
|||
throws CacheException {
|
||||
if (log.isDebugEnabled()) log.debug("Building timestamps cache region [" + regionName + "]");
|
||||
AdvancedCache cache = getCache(regionName, TIMESTAMPS_KEY, properties);
|
||||
CacheAdapter cacheAdapter = CacheAdapterImpl.newInstance(cache);
|
||||
TimestampsRegionImpl region = createTimestampsRegion(cacheAdapter, regionName);
|
||||
TimestampsRegionImpl region = createTimestampsRegion(cache, regionName);
|
||||
startRegion(region, regionName);
|
||||
return region;
|
||||
}
|
||||
|
||||
protected TimestampsRegionImpl createTimestampsRegion(CacheAdapter cacheAdapter, String regionName) {
|
||||
return new TimestampsRegionImpl(cacheAdapter, regionName, transactionManager, this);
|
||||
protected TimestampsRegionImpl createTimestampsRegion(
|
||||
AdvancedCache cache, String regionName) {
|
||||
if (Caches.isClustered(cache))
|
||||
return new ClusteredTimestampsRegionImpl(cache, regionName, this);
|
||||
else
|
||||
return new TimestampsRegionImpl(cache, regionName, this);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -301,7 +304,6 @@ public class InfinispanRegionFactory implements RegionFactory {
|
|||
log.debug("Starting Infinispan region factory");
|
||||
try {
|
||||
transactionManagerlookup = createTransactionManagerLookup(settings, properties);
|
||||
transactionManager = transactionManagerlookup.getTransactionManager();
|
||||
manager = createCacheManager(properties);
|
||||
initGenericDataTypeOverrides();
|
||||
Enumeration keys = properties.propertyNames();
|
||||
|
@ -313,6 +315,7 @@ public class InfinispanRegionFactory implements RegionFactory {
|
|||
}
|
||||
}
|
||||
defineGenericDataTypeCacheConfigurations(settings, properties);
|
||||
definePendingPutsCache();
|
||||
} catch (CacheException ce) {
|
||||
throw ce;
|
||||
} catch (Throwable t) {
|
||||
|
@ -320,6 +323,22 @@ public class InfinispanRegionFactory implements RegionFactory {
|
|||
}
|
||||
}
|
||||
|
||||
private void definePendingPutsCache() {
|
||||
ConfigurationBuilder builder = new ConfigurationBuilder();
|
||||
// A local, lightweight cache for pending puts, which is
|
||||
// non-transactional and has aggressive expiration settings.
|
||||
// Locking is still required since the putFromLoad validator
|
||||
// code uses conditional operations (i.e. putIfAbsent).
|
||||
builder.clustering().cacheMode(CacheMode.LOCAL)
|
||||
.transaction().transactionMode(TransactionMode.NON_TRANSACTIONAL)
|
||||
.expiration().maxIdle(TimeUnit.SECONDS.toMillis(60))
|
||||
.storeAsBinary().enabled(false)
|
||||
.locking().isolationLevel(IsolationLevel.READ_COMMITTED)
|
||||
.jmxStatistics().disable();
|
||||
|
||||
manager.defineConfiguration(PENDING_PUTS_CACHE_NAME, builder.build());
|
||||
}
|
||||
|
||||
protected org.infinispan.transaction.lookup.TransactionManagerLookup createTransactionManagerLookup(
|
||||
Settings settings, Properties properties) {
|
||||
return new HibernateTransactionManagerLookup(settings, properties);
|
||||
|
@ -336,7 +355,8 @@ public class InfinispanRegionFactory implements RegionFactory {
|
|||
|
||||
protected void stopCacheRegions() {
|
||||
log.debug("Clear region references");
|
||||
getCacheCommandFactory(manager.getCache()).clearRegions(regionNames);
|
||||
getCacheCommandFactory(manager.getCache().getAdvancedCache())
|
||||
.clearRegions(regionNames);
|
||||
regionNames.clear();
|
||||
}
|
||||
|
||||
|
@ -376,8 +396,7 @@ public class InfinispanRegionFactory implements RegionFactory {
|
|||
|
||||
private void startRegion(BaseRegion region, String regionName) {
|
||||
regionNames.add(regionName);
|
||||
getCacheCommandFactory(region.getCacheAdapter().getCache())
|
||||
.addRegion(regionName, region);
|
||||
getCacheCommandFactory(region.getCache()).addRegion(regionName, region);
|
||||
}
|
||||
|
||||
private Map<String, TypeOverrides> initGenericDataTypeOverrides() {
|
||||
|
@ -487,11 +506,14 @@ public class InfinispanRegionFactory implements RegionFactory {
|
|||
return createCacheWrapper(cache);
|
||||
}
|
||||
|
||||
private CacheCommandFactory getCacheCommandFactory(Cache cache) {
|
||||
GlobalComponentRegistry globalCr = cache.getAdvancedCache()
|
||||
.getComponentRegistry().getGlobalComponentRegistry();
|
||||
private CacheCommandFactory getCacheCommandFactory(AdvancedCache cache) {
|
||||
GlobalComponentRegistry globalCr = cache.getComponentRegistry()
|
||||
.getGlobalComponentRegistry();
|
||||
|
||||
Map<Byte, ModuleCommandFactory> factories =
|
||||
(Map<Byte, ModuleCommandFactory>) globalCr.getComponent("org.infinispan.modules.command.factories");
|
||||
(Map<Byte, ModuleCommandFactory>) globalCr
|
||||
.getComponent("org.infinispan.modules.command.factories");
|
||||
|
||||
for (ModuleCommandFactory factory : factories.values()) {
|
||||
if (factory instanceof CacheCommandFactory)
|
||||
return (CacheCommandFactory) factory;
|
||||
|
@ -503,7 +525,11 @@ public class InfinispanRegionFactory implements RegionFactory {
|
|||
}
|
||||
|
||||
protected AdvancedCache createCacheWrapper(AdvancedCache cache) {
|
||||
return new ClassLoaderAwareCache(cache, Thread.currentThread().getContextClassLoader());
|
||||
if (Caches.isClustered(cache))
|
||||
return new ClassLoaderAwareCache(cache,
|
||||
Thread.currentThread().getContextClassLoader());
|
||||
|
||||
return cache;
|
||||
}
|
||||
|
||||
private Configuration configureTransactionManager(Configuration regionOverrides, String templateCacheName, Properties properties) {
|
||||
|
|
|
@ -23,7 +23,6 @@
|
|||
*/
|
||||
package org.hibernate.cache.infinispan.access;
|
||||
|
||||
import java.lang.ref.WeakReference;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
|
@ -38,6 +37,9 @@ import javax.transaction.Transaction;
|
|||
import javax.transaction.TransactionManager;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.infinispan.AdvancedCache;
|
||||
import org.infinispan.manager.EmbeddedCacheManager;
|
||||
|
||||
/**
|
||||
* Encapsulates logic to allow a {@link TransactionalAccessDelegate} to determine
|
||||
|
@ -91,42 +93,19 @@ public class PutFromLoadValidator {
|
|||
*/
|
||||
public static final long NAKED_PUT_INVALIDATION_PERIOD = TimeUnit.SECONDS.toMillis(20);
|
||||
|
||||
/** Period (in ms) after which a pending put is placed in the over-age queue */
|
||||
private static final long PENDING_PUT_OVERAGE_PERIOD = TimeUnit.SECONDS.toMillis(5);
|
||||
|
||||
/** Period (in ms) before which we stop trying to clean out pending puts */
|
||||
private static final long PENDING_PUT_RECENT_PERIOD = TimeUnit.SECONDS.toMillis(2);
|
||||
|
||||
/** Period (in ms) after which a pending put is never expected to come in and should be cleaned */
|
||||
private static final long MAX_PENDING_PUT_DELAY = TimeUnit.SECONDS.toMillis(2 * 60);
|
||||
|
||||
/**
|
||||
* Used to determine whether the owner of a pending put is a thread or a transaction
|
||||
*/
|
||||
private final TransactionManager transactionManager;
|
||||
|
||||
private final long nakedPutInvalidationPeriod;
|
||||
private final long pendingPutOveragePeriod;
|
||||
private final long pendingPutRecentPeriod;
|
||||
private final long maxPendingPutDelay;
|
||||
|
||||
/**
|
||||
* Registry of expected, future, isPutValid calls. If a key+owner is registered in this map, it
|
||||
* is not a "naked put" and is allowed to proceed.
|
||||
*/
|
||||
private final ConcurrentMap<Object, PendingPutMap> pendingPuts = new ConcurrentHashMap<Object, PendingPutMap>();
|
||||
/**
|
||||
* List of pending puts. Used to ensure we don't leak memory via the pendingPuts map
|
||||
*/
|
||||
private final List<WeakReference<PendingPut>> pendingQueue = new LinkedList<WeakReference<PendingPut>>();
|
||||
/**
|
||||
* Separate list of pending puts that haven't been resolved within PENDING_PUT_OVERAGE_PERIOD.
|
||||
* Used to ensure we don't leak memory via the pendingPuts map. Tracked separately from more
|
||||
* recent pending puts for efficiency reasons.
|
||||
*/
|
||||
private final List<WeakReference<PendingPut>> overagePendingQueue = new LinkedList<WeakReference<PendingPut>>();
|
||||
/** Lock controlling access to pending put queues */
|
||||
private final Lock pendingLock = new ReentrantLock();
|
||||
private final ConcurrentMap<Object, PendingPutMap> pendingPuts;
|
||||
|
||||
private final ConcurrentMap<Object, Long> recentRemovals = new ConcurrentHashMap<Object, Long>();
|
||||
/**
|
||||
* List of recent removals. Used to ensure we don't leak memory via the recentRemovals map
|
||||
|
@ -148,27 +127,26 @@ public class PutFromLoadValidator {
|
|||
|
||||
/**
|
||||
* Creates a new PutFromLoadValidator.
|
||||
*
|
||||
* @param transactionManager
|
||||
* transaction manager to use to associate changes with a transaction; may be
|
||||
* <code>null</code>
|
||||
*/
|
||||
public PutFromLoadValidator(TransactionManager transactionManager) {
|
||||
this(transactionManager, NAKED_PUT_INVALIDATION_PERIOD, PENDING_PUT_OVERAGE_PERIOD,
|
||||
PENDING_PUT_RECENT_PERIOD, MAX_PENDING_PUT_DELAY);
|
||||
public PutFromLoadValidator(AdvancedCache cache) {
|
||||
this(cache, NAKED_PUT_INVALIDATION_PERIOD);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor variant for use by unit tests; allows control of various timeouts by the test.
|
||||
*/
|
||||
protected PutFromLoadValidator(TransactionManager transactionManager,
|
||||
long nakedPutInvalidationPeriod, long pendingPutOveragePeriod,
|
||||
long pendingPutRecentPeriod, long maxPendingPutDelay) {
|
||||
this.transactionManager = transactionManager;
|
||||
public PutFromLoadValidator(AdvancedCache cache,
|
||||
long nakedPutInvalidationPeriod) {
|
||||
this(cache.getCacheManager(), cache.getTransactionManager(),
|
||||
nakedPutInvalidationPeriod);
|
||||
}
|
||||
|
||||
public PutFromLoadValidator(EmbeddedCacheManager cacheManager,
|
||||
TransactionManager tm, long nakedPutInvalidationPeriod) {
|
||||
this.pendingPuts = cacheManager
|
||||
.getCache(InfinispanRegionFactory.PENDING_PUTS_CACHE_NAME);
|
||||
this.transactionManager = tm;
|
||||
this.nakedPutInvalidationPeriod = nakedPutInvalidationPeriod;
|
||||
this.pendingPutOveragePeriod = pendingPutOveragePeriod;
|
||||
this.pendingPutRecentPeriod = pendingPutRecentPeriod;
|
||||
this.maxPendingPutDelay = maxPendingPutDelay;
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------- Public
|
||||
|
@ -191,10 +169,6 @@ public class PutFromLoadValidator {
|
|||
boolean locked = false;
|
||||
long now = System.currentTimeMillis();
|
||||
|
||||
// Important: Do cleanup before we acquire any locks so we
|
||||
// don't deadlock with invalidateRegion
|
||||
cleanOutdatedPendingPuts(now, true);
|
||||
|
||||
try {
|
||||
PendingPutMap pending = pendingPuts.get(key);
|
||||
if (pending != null) {
|
||||
|
@ -233,9 +207,6 @@ public class PutFromLoadValidator {
|
|||
}
|
||||
}
|
||||
catch (Throwable t) {
|
||||
|
||||
valid = false;
|
||||
|
||||
if (locked) {
|
||||
PendingPutMap toRelease = pendingPuts.get(key);
|
||||
if (toRelease != null) {
|
||||
|
@ -283,7 +254,6 @@ public class PutFromLoadValidator {
|
|||
* caller should treat as an exception condition)
|
||||
*/
|
||||
public boolean invalidateKey(Object key) {
|
||||
|
||||
boolean success = true;
|
||||
|
||||
// Invalidate any pending puts
|
||||
|
@ -330,7 +300,7 @@ public class PutFromLoadValidator {
|
|||
Long cleaned = recentRemovals.get(toClean.key);
|
||||
if (cleaned != null && cleaned.equals(toClean.timestamp)) {
|
||||
cleaned = recentRemovals.remove(toClean.key);
|
||||
if (cleaned != null && cleaned.equals(toClean.timestamp) == false) {
|
||||
if (cleaned != null && !cleaned.equals(toClean.timestamp)) {
|
||||
// Oops; removed the wrong timestamp; restore it
|
||||
recentRemovals.putIfAbsent(toClean.key, cleaned);
|
||||
}
|
||||
|
@ -405,13 +375,14 @@ public class PutFromLoadValidator {
|
|||
* @param key key that will be used for subsequent cache put
|
||||
*/
|
||||
public void registerPendingPut(Object key) {
|
||||
PendingPut pendingPut = new PendingPut(key, getOwnerForPut());
|
||||
PendingPut pendingPut = new PendingPut(getOwnerForPut());
|
||||
PendingPutMap pendingForKey = new PendingPutMap(pendingPut);
|
||||
|
||||
for (;;) {
|
||||
PendingPutMap existing = pendingPuts.putIfAbsent(key, pendingForKey);
|
||||
if (existing != null) {
|
||||
if (existing.acquireLock(10, TimeUnit.SECONDS)) {
|
||||
|
||||
try {
|
||||
existing.put(pendingPut);
|
||||
PendingPutMap doublecheck = pendingPuts.putIfAbsent(key, existing);
|
||||
|
@ -432,33 +403,10 @@ public class PutFromLoadValidator {
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Guard against memory leaks
|
||||
preventOutdatedPendingPuts(pendingPut);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------- Protected
|
||||
|
||||
/** Only for use by unit tests; may be removed at any time */
|
||||
protected int getPendingPutQueueLength() {
|
||||
pendingLock.lock();
|
||||
try {
|
||||
return pendingQueue.size();
|
||||
} finally {
|
||||
pendingLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/** Only for use by unit tests; may be removed at any time */
|
||||
protected int getOveragePendingPutQueueLength() {
|
||||
pendingLock.lock();
|
||||
try {
|
||||
return overagePendingQueue.size();
|
||||
} finally {
|
||||
pendingLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/** Only for use by unit tests; may be removed at any time */
|
||||
protected int getRemovalQueueLength() {
|
||||
removalsLock.lock();
|
||||
|
@ -484,119 +432,6 @@ public class PutFromLoadValidator {
|
|||
|
||||
}
|
||||
|
||||
private void preventOutdatedPendingPuts(PendingPut pendingPut) {
|
||||
pendingLock.lock();
|
||||
try {
|
||||
pendingQueue.add(new WeakReference<PendingPut>(pendingPut));
|
||||
if (pendingQueue.size() > 1) {
|
||||
cleanOutdatedPendingPuts(pendingPut.timestamp, false);
|
||||
}
|
||||
} finally {
|
||||
pendingLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
private void cleanOutdatedPendingPuts(long now, boolean lock) {
|
||||
|
||||
PendingPut toClean = null;
|
||||
if (lock) {
|
||||
pendingLock.lock();
|
||||
}
|
||||
try {
|
||||
// Clean items out of the basic queue
|
||||
long overaged = now - this.pendingPutOveragePeriod;
|
||||
long recent = now - this.pendingPutRecentPeriod;
|
||||
|
||||
int pos = 0;
|
||||
while (pendingQueue.size() > pos) {
|
||||
WeakReference<PendingPut> ref = pendingQueue.get(pos);
|
||||
PendingPut item = ref.get();
|
||||
if (item == null || item.completed) {
|
||||
pendingQueue.remove(pos);
|
||||
} else if (item.timestamp < overaged) {
|
||||
// Potential leak; move to the overaged queued
|
||||
pendingQueue.remove(pos);
|
||||
overagePendingQueue.add(ref);
|
||||
} else if (item.timestamp >= recent) {
|
||||
// Don't waste time on very recent items
|
||||
break;
|
||||
} else if (pos > 2) {
|
||||
// Don't spend too much time getting nowhere
|
||||
break;
|
||||
} else {
|
||||
// Move on to the next item
|
||||
pos++;
|
||||
}
|
||||
}
|
||||
|
||||
// Process the overage queue until we find an item to clean
|
||||
// or an incomplete item that hasn't aged out
|
||||
long mustCleanTime = now - this.maxPendingPutDelay;
|
||||
|
||||
while (overagePendingQueue.size() > 0) {
|
||||
WeakReference<PendingPut> ref = overagePendingQueue.get(0);
|
||||
PendingPut item = ref.get();
|
||||
if (item == null || item.completed) {
|
||||
overagePendingQueue.remove(0);
|
||||
} else {
|
||||
if (item.timestamp < mustCleanTime) {
|
||||
overagePendingQueue.remove(0);
|
||||
toClean = item;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
if (lock) {
|
||||
pendingLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
// We've found a pendingPut that never happened; clean it up
|
||||
if (toClean != null) {
|
||||
PendingPutMap map = pendingPuts.get(toClean.key);
|
||||
if (map != null) {
|
||||
if (map.acquireLock(100, TimeUnit.MILLISECONDS)) {
|
||||
try {
|
||||
PendingPut cleaned = map.remove(toClean.owner);
|
||||
if (toClean.equals(cleaned) == false) {
|
||||
if (cleaned != null) {
|
||||
// Oops. Restore it.
|
||||
map.put(cleaned);
|
||||
}
|
||||
} else if (map.size() == 0) {
|
||||
pendingPuts.remove(toClean.key, map);
|
||||
}
|
||||
}
|
||||
finally {
|
||||
map.releaseLock();
|
||||
}
|
||||
} else {
|
||||
// Something's gone wrong and the lock isn't being released.
|
||||
// We removed toClean from the queue and need to restore it
|
||||
// TODO this is pretty dodgy
|
||||
restorePendingPut(toClean);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private void restorePendingPut(PendingPut toRestore) {
|
||||
pendingLock.lock();
|
||||
try {
|
||||
// Give it a new lease on life so it's not out of order. We could
|
||||
// scan the queue and put toRestore back at the front, but then
|
||||
// we'll just immediately try removing it again; instead we
|
||||
// let it cycle through the queue again
|
||||
toRestore.refresh();
|
||||
pendingQueue.add(new WeakReference<PendingPut>(toRestore));
|
||||
}
|
||||
finally {
|
||||
pendingLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Lazy-initialization map for PendingPut. Optimized for the expected usual case where only a
|
||||
* single put is pending for a given key.
|
||||
|
@ -677,19 +512,12 @@ public class PutFromLoadValidator {
|
|||
}
|
||||
|
||||
private static class PendingPut {
|
||||
private final Object key;
|
||||
private final Object owner;
|
||||
private long timestamp = System.currentTimeMillis();
|
||||
private volatile boolean completed;
|
||||
|
||||
private PendingPut(Object key, Object owner) {
|
||||
this.key = key;
|
||||
private PendingPut(Object owner) {
|
||||
this.owner = owner;
|
||||
}
|
||||
|
||||
private void refresh() {
|
||||
timestamp = System.currentTimeMillis();
|
||||
}
|
||||
}
|
||||
|
||||
private static class RecentRemoval {
|
||||
|
|
|
@ -25,13 +25,14 @@ package org.hibernate.cache.infinispan.access;
|
|||
|
||||
import javax.transaction.Transaction;
|
||||
|
||||
import org.hibernate.cache.infinispan.util.Caches;
|
||||
import org.infinispan.AdvancedCache;
|
||||
import org.infinispan.context.Flag;
|
||||
import org.infinispan.util.logging.Log;
|
||||
import org.infinispan.util.logging.LogFactory;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.impl.BaseRegion;
|
||||
import org.hibernate.cache.infinispan.util.CacheAdapter;
|
||||
import org.hibernate.cache.infinispan.util.FlagAdapter;
|
||||
import org.hibernate.cache.spi.access.CollectionRegionAccessStrategy;
|
||||
import org.hibernate.cache.spi.access.EntityRegionAccessStrategy;
|
||||
import org.hibernate.cache.spi.access.SoftLock;
|
||||
|
@ -49,20 +50,26 @@ import org.hibernate.cache.spi.access.SoftLock;
|
|||
public class TransactionalAccessDelegate {
|
||||
private static final Log log = LogFactory.getLog(TransactionalAccessDelegate.class);
|
||||
private static final boolean isTrace = log.isTraceEnabled();
|
||||
protected final CacheAdapter cacheAdapter;
|
||||
protected final BaseRegion region;
|
||||
protected final PutFromLoadValidator putValidator;
|
||||
private final AdvancedCache cache;
|
||||
private final BaseRegion region;
|
||||
private final PutFromLoadValidator putValidator;
|
||||
private final AdvancedCache writeCache;
|
||||
private final AdvancedCache putFromLoadCache;
|
||||
|
||||
public TransactionalAccessDelegate(BaseRegion region, PutFromLoadValidator validator) {
|
||||
this.region = region;
|
||||
this.cacheAdapter = region.getCacheAdapter();
|
||||
this.cache = region.getCache();
|
||||
this.putValidator = validator;
|
||||
this.writeCache = Caches.isInvalidationCache(cache) ?
|
||||
Caches.ignoreReturnValuesCache(cache, Flag.CACHE_MODE_LOCAL) :
|
||||
Caches.ignoreReturnValuesCache(cache);
|
||||
this.putFromLoadCache = Caches.ignoreReturnValuesCache(cache);
|
||||
}
|
||||
|
||||
public Object get(Object key, long txTimestamp) throws CacheException {
|
||||
if (!region.checkValid())
|
||||
return null;
|
||||
Object val = cacheAdapter.get(key);
|
||||
Object val = cache.get(key);
|
||||
if (val == null)
|
||||
putValidator.registerPendingPut(key);
|
||||
return val;
|
||||
|
@ -84,7 +91,7 @@ public class TransactionalAccessDelegate {
|
|||
// without https://issues.jboss.org/browse/ISPN-1986, it's impossible to
|
||||
// know whether the put actually occurred. Knowing this is crucial so
|
||||
// that Hibernate can expose accurate statistics.
|
||||
if (minimalPutOverride && cacheAdapter.containsKey(key))
|
||||
if (minimalPutOverride && cache.containsKey(key))
|
||||
return false;
|
||||
|
||||
if (!putValidator.acquirePutFromLoadLock(key)) {
|
||||
|
@ -93,7 +100,7 @@ public class TransactionalAccessDelegate {
|
|||
}
|
||||
|
||||
try {
|
||||
cacheAdapter.putForExternalRead(key, value);
|
||||
putFromLoadCache.putForExternalRead(key, value);
|
||||
} finally {
|
||||
putValidator.releasePutFromLoadLock(key);
|
||||
}
|
||||
|
@ -119,11 +126,7 @@ public class TransactionalAccessDelegate {
|
|||
if (!region.checkValid())
|
||||
return false;
|
||||
|
||||
if (cacheAdapter.isClusteredInvalidation())
|
||||
cacheAdapter.withFlags(FlagAdapter.CACHE_MODE_LOCAL).put(key, value);
|
||||
else
|
||||
cacheAdapter.put(key, value);
|
||||
|
||||
writeCache.put(key, value);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -135,7 +138,7 @@ public class TransactionalAccessDelegate {
|
|||
// We update whether or not the region is valid. Other nodes
|
||||
// may have already restored the region so they need to
|
||||
// be informed of the change.
|
||||
cacheAdapter.put(key, value);
|
||||
writeCache.put(key, value);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -151,21 +154,21 @@ public class TransactionalAccessDelegate {
|
|||
// We update whether or not the region is valid. Other nodes
|
||||
// may have already restored the region so they need to
|
||||
// be informed of the change.
|
||||
cacheAdapter.remove(key);
|
||||
writeCache.remove(key);
|
||||
}
|
||||
|
||||
public void removeAll() throws CacheException {
|
||||
if (!putValidator.invalidateRegion()) {
|
||||
throw new CacheException("Failed to invalidate pending putFromLoad calls for region " + region.getName());
|
||||
}
|
||||
cacheAdapter.clear();
|
||||
cache.clear();
|
||||
}
|
||||
|
||||
public void evict(Object key) throws CacheException {
|
||||
if (!putValidator.invalidateKey(key)) {
|
||||
throw new CacheException("Failed to invalidate pending putFromLoad calls for key " + key + " from region " + region.getName());
|
||||
}
|
||||
cacheAdapter.remove(key);
|
||||
}
|
||||
writeCache.remove(key);
|
||||
}
|
||||
|
||||
public void evictAll() throws CacheException {
|
||||
|
@ -175,9 +178,10 @@ public class TransactionalAccessDelegate {
|
|||
Transaction tx = region.suspend();
|
||||
try {
|
||||
region.invalidateRegion(); // Invalidate the local region and then go remote
|
||||
cacheAdapter.broadcastEvictAll();
|
||||
Caches.broadcastEvictAll(cache);
|
||||
} finally {
|
||||
region.resume(tx);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,16 +1,14 @@
|
|||
package org.hibernate.cache.infinispan.collection;
|
||||
|
||||
import javax.transaction.TransactionManager;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.access.PutFromLoadValidator;
|
||||
import org.hibernate.cache.infinispan.impl.BaseTransactionalDataRegion;
|
||||
import org.hibernate.cache.infinispan.util.CacheAdapter;
|
||||
import org.hibernate.cache.spi.CacheDataDescription;
|
||||
import org.hibernate.cache.spi.CollectionRegion;
|
||||
import org.hibernate.cache.spi.RegionFactory;
|
||||
import org.hibernate.cache.spi.access.AccessType;
|
||||
import org.hibernate.cache.spi.access.CollectionRegionAccessStrategy;
|
||||
import org.infinispan.AdvancedCache;
|
||||
|
||||
/**
|
||||
* @author Chris Bredesen
|
||||
|
@ -19,9 +17,9 @@ import org.hibernate.cache.spi.access.CollectionRegionAccessStrategy;
|
|||
*/
|
||||
public class CollectionRegionImpl extends BaseTransactionalDataRegion implements CollectionRegion {
|
||||
|
||||
public CollectionRegionImpl(CacheAdapter cacheAdapter, String name, CacheDataDescription metadata,
|
||||
TransactionManager transactionManager, RegionFactory factory) {
|
||||
super(cacheAdapter, name, metadata, transactionManager, factory);
|
||||
public CollectionRegionImpl(AdvancedCache cache, String name,
|
||||
CacheDataDescription metadata, RegionFactory factory) {
|
||||
super(cache, name, metadata, factory);
|
||||
}
|
||||
|
||||
public CollectionRegionAccessStrategy buildAccessStrategy(AccessType accessType) throws CacheException {
|
||||
|
@ -33,6 +31,7 @@ public class CollectionRegionImpl extends BaseTransactionalDataRegion implements
|
|||
}
|
||||
|
||||
public PutFromLoadValidator getPutFromLoadValidator() {
|
||||
return new PutFromLoadValidator(transactionManager);
|
||||
return new PutFromLoadValidator(cache);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,16 +1,14 @@
|
|||
package org.hibernate.cache.infinispan.entity;
|
||||
|
||||
import javax.transaction.TransactionManager;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.access.PutFromLoadValidator;
|
||||
import org.hibernate.cache.infinispan.impl.BaseTransactionalDataRegion;
|
||||
import org.hibernate.cache.infinispan.util.CacheAdapter;
|
||||
import org.hibernate.cache.spi.CacheDataDescription;
|
||||
import org.hibernate.cache.spi.EntityRegion;
|
||||
import org.hibernate.cache.spi.RegionFactory;
|
||||
import org.hibernate.cache.spi.access.AccessType;
|
||||
import org.hibernate.cache.spi.access.EntityRegionAccessStrategy;
|
||||
import org.infinispan.AdvancedCache;
|
||||
|
||||
/**
|
||||
* @author Chris Bredesen
|
||||
|
@ -19,9 +17,9 @@ import org.hibernate.cache.spi.access.EntityRegionAccessStrategy;
|
|||
*/
|
||||
public class EntityRegionImpl extends BaseTransactionalDataRegion implements EntityRegion {
|
||||
|
||||
public EntityRegionImpl(CacheAdapter cacheAdapter, String name, CacheDataDescription metadata,
|
||||
TransactionManager transactionManager, RegionFactory factory) {
|
||||
super(cacheAdapter, name, metadata, transactionManager, factory);
|
||||
public EntityRegionImpl(AdvancedCache cache, String name,
|
||||
CacheDataDescription metadata, RegionFactory factory) {
|
||||
super(cache, name, metadata, factory);
|
||||
}
|
||||
|
||||
public EntityRegionAccessStrategy buildAccessStrategy(AccessType accessType) throws CacheException {
|
||||
|
@ -34,6 +32,7 @@ public class EntityRegionImpl extends BaseTransactionalDataRegion implements Ent
|
|||
}
|
||||
|
||||
public PutFromLoadValidator getPutFromLoadValidator() {
|
||||
return new PutFromLoadValidator(transactionManager);
|
||||
return new PutFromLoadValidator(cache);
|
||||
}
|
||||
|
||||
}
|
|
@ -3,9 +3,10 @@ package org.hibernate.cache.infinispan.impl;
|
|||
import javax.transaction.TransactionManager;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.util.CacheAdapter;
|
||||
import org.hibernate.cache.infinispan.util.Caches;
|
||||
import org.hibernate.cache.spi.GeneralDataRegion;
|
||||
import org.hibernate.cache.spi.RegionFactory;
|
||||
import org.infinispan.AdvancedCache;
|
||||
|
||||
/**
|
||||
* Support for Infinispan {@link GeneralDataRegion} implementors.
|
||||
|
@ -16,24 +17,28 @@ import org.hibernate.cache.spi.RegionFactory;
|
|||
*/
|
||||
public abstract class BaseGeneralDataRegion extends BaseRegion implements GeneralDataRegion {
|
||||
|
||||
public BaseGeneralDataRegion(CacheAdapter cacheAdapter, String name, TransactionManager transactionManager, RegionFactory factory) {
|
||||
super(cacheAdapter, name, transactionManager, factory);
|
||||
private final AdvancedCache putCache;
|
||||
|
||||
public BaseGeneralDataRegion(AdvancedCache cache, String name,
|
||||
RegionFactory factory) {
|
||||
super(cache, name, factory);
|
||||
this.putCache = Caches.ignoreReturnValuesCache(cache);
|
||||
}
|
||||
|
||||
public void evict(Object key) throws CacheException {
|
||||
cacheAdapter.evict(key);
|
||||
cache.evict(key);
|
||||
}
|
||||
|
||||
public void evictAll() throws CacheException {
|
||||
cacheAdapter.clear();
|
||||
cache.clear();
|
||||
}
|
||||
|
||||
public Object get(Object key) throws CacheException {
|
||||
return cacheAdapter.get(key);
|
||||
return cache.get(key);
|
||||
}
|
||||
|
||||
public void put(Object key, Object value) throws CacheException {
|
||||
cacheAdapter.put(key, value);
|
||||
putCache.put(key, value);
|
||||
}
|
||||
|
||||
}
|
|
@ -8,13 +8,13 @@ import javax.transaction.SystemException;
|
|||
import javax.transaction.Transaction;
|
||||
import javax.transaction.TransactionManager;
|
||||
|
||||
import org.hibernate.cache.infinispan.util.Caches;
|
||||
import org.infinispan.AdvancedCache;
|
||||
import org.infinispan.context.Flag;
|
||||
import org.infinispan.util.logging.Log;
|
||||
import org.infinispan.util.logging.LogFactory;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.util.AddressAdapter;
|
||||
import org.hibernate.cache.infinispan.util.CacheAdapter;
|
||||
import org.hibernate.cache.infinispan.util.FlagAdapter;
|
||||
import org.hibernate.cache.spi.Region;
|
||||
import org.hibernate.cache.spi.RegionFactory;
|
||||
|
||||
|
@ -29,37 +29,38 @@ import org.hibernate.cache.spi.RegionFactory;
|
|||
*/
|
||||
public abstract class BaseRegion implements Region {
|
||||
|
||||
private enum InvalidateState { INVALID, CLEARING, VALID };
|
||||
private static final Log log = LogFactory.getLog(BaseRegion.class);
|
||||
|
||||
private enum InvalidateState {
|
||||
INVALID, CLEARING, VALID
|
||||
}
|
||||
|
||||
private final String name;
|
||||
protected final CacheAdapter cacheAdapter;
|
||||
protected final AddressAdapter address;
|
||||
protected final TransactionManager transactionManager;
|
||||
protected final boolean replication;
|
||||
protected final Object invalidationMutex = new Object();
|
||||
protected final AtomicReference<InvalidateState> invalidateState = new AtomicReference<InvalidateState>(InvalidateState.VALID);
|
||||
private final AdvancedCache regionClearCache;
|
||||
private final TransactionManager tm;
|
||||
private final Object invalidationMutex = new Object();
|
||||
private final AtomicReference<InvalidateState> invalidateState =
|
||||
new AtomicReference<InvalidateState>(InvalidateState.VALID);
|
||||
private final RegionFactory factory;
|
||||
|
||||
public BaseRegion(CacheAdapter cacheAdapter, String name, TransactionManager transactionManager, RegionFactory factory) {
|
||||
this.cacheAdapter = cacheAdapter;
|
||||
protected final AdvancedCache cache;
|
||||
|
||||
public BaseRegion(AdvancedCache cache, String name, RegionFactory factory) {
|
||||
this.cache = cache;
|
||||
this.name = name;
|
||||
this.transactionManager = transactionManager;
|
||||
this.replication = cacheAdapter.isClusteredReplication();
|
||||
this.address = this.cacheAdapter.getAddress();
|
||||
this.tm = cache.getTransactionManager();
|
||||
this.factory = factory;
|
||||
this.regionClearCache = cache.withFlags(
|
||||
Flag.CACHE_MODE_LOCAL, Flag.ZERO_LOCK_ACQUISITION_TIMEOUT);
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public CacheAdapter getCacheAdapter() {
|
||||
return cacheAdapter;
|
||||
}
|
||||
|
||||
public long getElementCountInMemory() {
|
||||
if (checkValid())
|
||||
return cacheAdapter.size();
|
||||
return cache.size();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -92,51 +93,46 @@ public abstract class BaseRegion implements Region {
|
|||
|
||||
public Map toMap() {
|
||||
if (checkValid())
|
||||
return cacheAdapter.toMap();
|
||||
return cache;
|
||||
|
||||
return Collections.EMPTY_MAP;
|
||||
}
|
||||
|
||||
public void destroy() throws CacheException {
|
||||
try {
|
||||
cacheAdapter.stop();
|
||||
cache.stop();
|
||||
} finally {
|
||||
cacheAdapter.removeListener(this);
|
||||
cache.removeListener(this);
|
||||
}
|
||||
}
|
||||
|
||||
public boolean contains(Object key) {
|
||||
if (!checkValid())
|
||||
return false;
|
||||
// Reads are non-blocking in Infinispan, so not sure of the necessity of passing ZERO_LOCK_ACQUISITION_TIMEOUT
|
||||
return cacheAdapter.withFlags(FlagAdapter.ZERO_LOCK_ACQUISITION_TIMEOUT).containsKey(key);
|
||||
}
|
||||
|
||||
public AddressAdapter getAddress() {
|
||||
return address;
|
||||
return checkValid() && cache.containsKey(key);
|
||||
}
|
||||
|
||||
public boolean checkValid() {
|
||||
boolean valid = isValid();
|
||||
if (!valid) {
|
||||
synchronized (invalidationMutex) {
|
||||
if (invalidateState.compareAndSet(InvalidateState.INVALID, InvalidateState.CLEARING)) {
|
||||
if (invalidateState.compareAndSet(
|
||||
InvalidateState.INVALID, InvalidateState.CLEARING)) {
|
||||
Transaction tx = suspend();
|
||||
try {
|
||||
// Clear region in a separate transaction
|
||||
cacheAdapter.withinTx(new Callable<Void>() {
|
||||
Caches.withinTx(cache, new Callable<Void>() {
|
||||
@Override
|
||||
public Void call() throws Exception {
|
||||
cacheAdapter.withFlags(FlagAdapter.CACHE_MODE_LOCAL,
|
||||
FlagAdapter.ZERO_LOCK_ACQUISITION_TIMEOUT).clear();
|
||||
regionClearCache.clear();
|
||||
return null;
|
||||
}
|
||||
});
|
||||
invalidateState.compareAndSet(InvalidateState.CLEARING, InvalidateState.VALID);
|
||||
invalidateState.compareAndSet(
|
||||
InvalidateState.CLEARING, InvalidateState.VALID);
|
||||
}
|
||||
catch (Exception e) {
|
||||
if (log.isTraceEnabled()) {
|
||||
log.trace("Could not invalidate region: " + e.getLocalizedMessage());
|
||||
log.trace("Could not invalidate region: "
|
||||
+ e.getLocalizedMessage());
|
||||
}
|
||||
}
|
||||
finally {
|
||||
|
@ -150,44 +146,10 @@ public abstract class BaseRegion implements Region {
|
|||
return valid;
|
||||
}
|
||||
|
||||
|
||||
|
||||
protected boolean isValid() {
|
||||
return invalidateState.get() == InvalidateState.VALID;
|
||||
}
|
||||
|
||||
/**
|
||||
* Performs a Infinispan <code>get(Fqn, Object)</code>
|
||||
*
|
||||
* @param key The key of the item to get
|
||||
* @param suppressTimeout should any TimeoutException be suppressed?
|
||||
* @param flagAdapters flags to add to the get invocation
|
||||
* @return The retrieved object
|
||||
* @throws CacheException issue managing transaction or talking to cache
|
||||
*/
|
||||
protected Object get(Object key, boolean suppressTimeout, FlagAdapter... flagAdapters) throws CacheException {
|
||||
CacheAdapter localCacheAdapter = cacheAdapter;
|
||||
if (flagAdapters != null && flagAdapters.length > 0)
|
||||
localCacheAdapter = cacheAdapter.withFlags(flagAdapters);
|
||||
|
||||
if (suppressTimeout)
|
||||
return localCacheAdapter.getAllowingTimeout(key);
|
||||
else
|
||||
return localCacheAdapter.get(key);
|
||||
}
|
||||
|
||||
public Object getOwnerForPut() {
|
||||
Transaction tx = null;
|
||||
try {
|
||||
if (transactionManager != null) {
|
||||
tx = transactionManager.getTransaction();
|
||||
}
|
||||
} catch (SystemException se) {
|
||||
throw new CacheException("Could not obtain transaction", se);
|
||||
}
|
||||
return tx == null ? Thread.currentThread() : tx;
|
||||
}
|
||||
|
||||
/**
|
||||
* Tell the TransactionManager to suspend any ongoing transaction.
|
||||
*
|
||||
|
@ -197,8 +159,8 @@ public abstract class BaseRegion implements Region {
|
|||
public Transaction suspend() {
|
||||
Transaction tx = null;
|
||||
try {
|
||||
if (transactionManager != null) {
|
||||
tx = transactionManager.suspend();
|
||||
if (tm != null) {
|
||||
tx = tm.suspend();
|
||||
}
|
||||
} catch (SystemException se) {
|
||||
throw new CacheException("Could not suspend transaction", se);
|
||||
|
@ -215,7 +177,7 @@ public abstract class BaseRegion implements Region {
|
|||
public void resume(Transaction tx) {
|
||||
try {
|
||||
if (tx != null)
|
||||
transactionManager.resume(tx);
|
||||
tm.resume(tx);
|
||||
} catch (Exception e) {
|
||||
throw new CacheException("Could not resume transaction", e);
|
||||
}
|
||||
|
@ -227,7 +189,17 @@ public abstract class BaseRegion implements Region {
|
|||
}
|
||||
|
||||
public TransactionManager getTransactionManager() {
|
||||
return transactionManager;
|
||||
return tm;
|
||||
}
|
||||
|
||||
// Used to satisfy TransactionalDataRegion.isTransactionAware in subclasses
|
||||
@SuppressWarnings("unused")
|
||||
public boolean isTransactionAware() {
|
||||
return tm != null;
|
||||
}
|
||||
|
||||
public AdvancedCache getCache() {
|
||||
return cache;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,11 +1,9 @@
|
|||
package org.hibernate.cache.infinispan.impl;
|
||||
|
||||
import javax.transaction.TransactionManager;
|
||||
|
||||
import org.hibernate.cache.infinispan.util.CacheAdapter;
|
||||
import org.hibernate.cache.spi.CacheDataDescription;
|
||||
import org.hibernate.cache.spi.RegionFactory;
|
||||
import org.hibernate.cache.spi.TransactionalDataRegion;
|
||||
import org.infinispan.AdvancedCache;
|
||||
|
||||
/**
|
||||
* Support for Inifinispan {@link org.hibernate.cache.spi.TransactionalDataRegion} implementors.
|
||||
|
@ -19,10 +17,9 @@ public abstract class BaseTransactionalDataRegion
|
|||
|
||||
private final CacheDataDescription metadata;
|
||||
|
||||
public BaseTransactionalDataRegion(CacheAdapter cacheAdapter, String name,
|
||||
CacheDataDescription metadata, TransactionManager transactionManager,
|
||||
RegionFactory factory) {
|
||||
super(cacheAdapter, name, transactionManager, factory);
|
||||
public BaseTransactionalDataRegion(AdvancedCache cache, String name,
|
||||
CacheDataDescription metadata, RegionFactory factory) {
|
||||
super(cache, name, factory);
|
||||
this.metadata = metadata;
|
||||
}
|
||||
|
||||
|
@ -30,8 +27,4 @@ public abstract class BaseTransactionalDataRegion
|
|||
return metadata;
|
||||
}
|
||||
|
||||
public boolean isTransactionAware() {
|
||||
return transactionManager != null;
|
||||
}
|
||||
|
||||
}
|
|
@ -1,25 +1,27 @@
|
|||
package org.hibernate.cache.infinispan.naturalid;
|
||||
|
||||
import javax.transaction.TransactionManager;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.access.PutFromLoadValidator;
|
||||
import org.hibernate.cache.infinispan.impl.BaseTransactionalDataRegion;
|
||||
import org.hibernate.cache.infinispan.util.CacheAdapter;
|
||||
import org.hibernate.cache.spi.CacheDataDescription;
|
||||
import org.hibernate.cache.spi.NaturalIdRegion;
|
||||
import org.hibernate.cache.spi.RegionFactory;
|
||||
import org.hibernate.cache.spi.access.AccessType;
|
||||
import org.hibernate.cache.spi.access.NaturalIdRegionAccessStrategy;
|
||||
import org.infinispan.AdvancedCache;
|
||||
|
||||
/**
|
||||
* Natural ID cache region
|
||||
*
|
||||
* @author Strong Liu <stliu@hibernate.org>
|
||||
* @author Galder Zamarreño
|
||||
*/
|
||||
public class NaturalIdRegionImpl extends BaseTransactionalDataRegion implements NaturalIdRegion {
|
||||
public NaturalIdRegionImpl(CacheAdapter cacheAdapter,
|
||||
String name, CacheDataDescription metadata,
|
||||
TransactionManager transactionManager, RegionFactory factory) {
|
||||
super( cacheAdapter, name, metadata, transactionManager, factory );
|
||||
public class NaturalIdRegionImpl extends BaseTransactionalDataRegion
|
||||
implements NaturalIdRegion {
|
||||
|
||||
public NaturalIdRegionImpl(AdvancedCache cache, String name,
|
||||
CacheDataDescription metadata, RegionFactory factory) {
|
||||
super(cache, name, metadata, factory);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -33,6 +35,7 @@ public class NaturalIdRegionImpl extends BaseTransactionalDataRegion implements
|
|||
}
|
||||
|
||||
public PutFromLoadValidator getPutFromLoadValidator() {
|
||||
return new PutFromLoadValidator(transactionManager);
|
||||
return new PutFromLoadValidator(cache);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,15 +1,14 @@
|
|||
package org.hibernate.cache.infinispan.query;
|
||||
|
||||
import java.util.Properties;
|
||||
import javax.transaction.Transaction;
|
||||
import javax.transaction.TransactionManager;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.impl.BaseTransactionalDataRegion;
|
||||
import org.hibernate.cache.infinispan.util.CacheAdapter;
|
||||
import org.hibernate.cache.infinispan.util.FlagAdapter;
|
||||
import org.hibernate.cache.infinispan.util.Caches;
|
||||
import org.hibernate.cache.spi.QueryResultsRegion;
|
||||
import org.hibernate.cache.spi.RegionFactory;
|
||||
import org.infinispan.AdvancedCache;
|
||||
import org.infinispan.context.Flag;
|
||||
|
||||
/**
|
||||
* @author Chris Bredesen
|
||||
|
@ -17,27 +16,35 @@ import org.hibernate.cache.spi.RegionFactory;
|
|||
* @since 3.5
|
||||
*/
|
||||
public class QueryResultsRegionImpl extends BaseTransactionalDataRegion implements QueryResultsRegion {
|
||||
private boolean localOnly;
|
||||
|
||||
public QueryResultsRegionImpl(CacheAdapter cacheAdapter, String name, Properties properties, TransactionManager transactionManager, RegionFactory factory) {
|
||||
super(cacheAdapter, name, null, transactionManager, factory);
|
||||
private final AdvancedCache evictCache;
|
||||
private final AdvancedCache putCache;
|
||||
private final AdvancedCache getCache;
|
||||
|
||||
public QueryResultsRegionImpl(AdvancedCache cache, String name, RegionFactory factory) {
|
||||
super(cache, name, null, factory);
|
||||
// If Infinispan is using INVALIDATION for query cache, we don't want to propagate changes.
|
||||
// We use the Timestamps cache to manage invalidation
|
||||
localOnly = cacheAdapter.isClusteredInvalidation();
|
||||
boolean localOnly = Caches.isInvalidationCache(cache);
|
||||
|
||||
this.evictCache = localOnly ? Caches.localCache(cache) : cache;
|
||||
|
||||
this.putCache = localOnly ?
|
||||
Caches.failSilentWriteCache(cache, Flag.CACHE_MODE_LOCAL) :
|
||||
Caches.failSilentWriteCache(cache);
|
||||
|
||||
this.getCache = Caches.failSilentReadCache(cache);
|
||||
}
|
||||
|
||||
public void evict(Object key) throws CacheException {
|
||||
if (localOnly)
|
||||
cacheAdapter.withFlags(FlagAdapter.CACHE_MODE_LOCAL).remove(key);
|
||||
else
|
||||
cacheAdapter.remove(key);
|
||||
evictCache.remove(key);
|
||||
}
|
||||
|
||||
public void evictAll() throws CacheException {
|
||||
Transaction tx = suspend();
|
||||
try {
|
||||
invalidateRegion(); // Invalidate the local region and then go remote
|
||||
cacheAdapter.broadcastEvictAll();
|
||||
Caches.broadcastEvictAll(cache);
|
||||
} finally {
|
||||
resume(tx);
|
||||
}
|
||||
|
@ -60,9 +67,9 @@ public class QueryResultsRegionImpl extends BaseTransactionalDataRegion implemen
|
|||
// Add a zero (or low) timeout option so we don't block
|
||||
// waiting for tx's that did a put to commit
|
||||
if (skipCacheStore)
|
||||
return get(key, true, FlagAdapter.ZERO_LOCK_ACQUISITION_TIMEOUT, FlagAdapter.SKIP_CACHE_STORE);
|
||||
return getCache.withFlags(Flag.SKIP_CACHE_STORE).get(key);
|
||||
else
|
||||
return get(key, true, FlagAdapter.ZERO_LOCK_ACQUISITION_TIMEOUT);
|
||||
return getCache.get(key);
|
||||
}
|
||||
|
||||
public void put(Object key, Object value) throws CacheException {
|
||||
|
@ -82,12 +89,8 @@ public class QueryResultsRegionImpl extends BaseTransactionalDataRegion implemen
|
|||
// any subsequent read will just see the old result with its
|
||||
// out-of-date timestamp; that result will be discarded and the
|
||||
// db query performed again.
|
||||
if (localOnly)
|
||||
cacheAdapter.withFlags(FlagAdapter.ZERO_LOCK_ACQUISITION_TIMEOUT, FlagAdapter.CACHE_MODE_LOCAL)
|
||||
.putAllowingTimeout(key, value);
|
||||
else
|
||||
cacheAdapter.withFlags(FlagAdapter.ZERO_LOCK_ACQUISITION_TIMEOUT)
|
||||
.putAllowingTimeout(key, value);
|
||||
putCache.put(key, value);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,152 @@
|
|||
/*
|
||||
* JBoss, Home of Professional Open Source
|
||||
* Copyright 2012 Red Hat Inc. and/or its affiliates and other
|
||||
* contributors as indicated by the @author tags. All rights reserved.
|
||||
* See the copyright.txt in the distribution for a full listing of
|
||||
* individual contributors.
|
||||
*
|
||||
* This is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU Lesser General Public License as
|
||||
* published by the Free Software Foundation; either version 2.1 of
|
||||
* the License, or (at your option) any later version.
|
||||
*
|
||||
* This software is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this software; if not, write to the Free
|
||||
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
||||
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
|
||||
*/
|
||||
|
||||
package org.hibernate.cache.infinispan.timestamp;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.util.Caches;
|
||||
import org.hibernate.cache.spi.RegionFactory;
|
||||
import org.infinispan.AdvancedCache;
|
||||
import org.infinispan.context.Flag;
|
||||
import org.infinispan.notifications.Listener;
|
||||
import org.infinispan.notifications.cachelistener.annotation.CacheEntryModified;
|
||||
import org.infinispan.notifications.cachelistener.annotation.CacheEntryRemoved;
|
||||
import org.infinispan.notifications.cachelistener.event.CacheEntryModifiedEvent;
|
||||
import org.infinispan.notifications.cachelistener.event.CacheEntryRemovedEvent;
|
||||
|
||||
import javax.transaction.Transaction;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
/**
|
||||
* Timestamp cache region for clustered environments.
|
||||
*
|
||||
* @author Galder Zamarreño
|
||||
* @since 4.1
|
||||
*/
|
||||
@Listener
|
||||
public class ClusteredTimestampsRegionImpl extends TimestampsRegionImpl {
|
||||
|
||||
/**
|
||||
* Maintains a local (authoritative) cache of timestamps along with the
|
||||
* replicated cache held in Infinispan. It listens for changes in the
|
||||
* cache and updates the local cache accordingly. This approach allows
|
||||
* timestamp changes to be replicated asynchronously.
|
||||
*/
|
||||
private final Map localCache = new ConcurrentHashMap();
|
||||
|
||||
public ClusteredTimestampsRegionImpl(AdvancedCache cache,
|
||||
String name, RegionFactory factory) {
|
||||
super(cache, name, factory);
|
||||
cache.addListener(this);
|
||||
populateLocalCache();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected AdvancedCache getTimestampsPutCache(AdvancedCache cache) {
|
||||
return Caches.asyncWriteCache(cache, Flag.SKIP_LOCKING);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object get(Object key) throws CacheException {
|
||||
Object value = localCache.get(key);
|
||||
|
||||
// If the region is not valid, skip cache store to avoid going remote to retrieve the query.
|
||||
// The aim of this is to maintain same logic/semantics as when state transfer was configured.
|
||||
// TODO: Once https://issues.jboss.org/browse/ISPN-835 has been resolved, revert to state transfer and remove workaround
|
||||
boolean skipCacheStore = false;
|
||||
if (!isValid())
|
||||
skipCacheStore = true;
|
||||
|
||||
if (value == null && checkValid()) {
|
||||
if (skipCacheStore)
|
||||
value = cache.withFlags(Flag.SKIP_CACHE_STORE).get(key);
|
||||
else
|
||||
value = cache.get(key);
|
||||
|
||||
if (value != null)
|
||||
localCache.put(key, value);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void evictAll() throws CacheException {
|
||||
// TODO Is this a valid operation on a timestamps cache?
|
||||
Transaction tx = suspend();
|
||||
try {
|
||||
invalidateRegion(); // Invalidate the local region and then go remote
|
||||
Caches.broadcastEvictAll(cache);
|
||||
} finally {
|
||||
resume(tx);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void invalidateRegion() {
|
||||
super.invalidateRegion(); // Invalidate first
|
||||
localCache.clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void destroy() throws CacheException {
|
||||
localCache.clear();
|
||||
cache.removeListener(this);
|
||||
super.destroy();
|
||||
}
|
||||
|
||||
/**
|
||||
* Brings all data from the distributed cache into our local cache.
|
||||
*/
|
||||
private void populateLocalCache() {
|
||||
Set children = cache.keySet();
|
||||
for (Object key : children)
|
||||
get(key);
|
||||
}
|
||||
|
||||
/**
|
||||
* Monitors cache events and updates the local cache
|
||||
*
|
||||
* @param event
|
||||
*/
|
||||
@CacheEntryModified
|
||||
@SuppressWarnings("unused")
|
||||
public void nodeModified(CacheEntryModifiedEvent event) {
|
||||
if (!event.isPre())
|
||||
localCache.put(event.getKey(), event.getValue());
|
||||
}
|
||||
|
||||
/**
|
||||
* Monitors cache events and updates the local cache
|
||||
*
|
||||
* @param event
|
||||
*/
|
||||
@CacheEntryRemoved
|
||||
@SuppressWarnings("unused")
|
||||
public void nodeRemoved(CacheEntryRemovedEvent event) {
|
||||
if (event.isPre()) return;
|
||||
localCache.remove(event.getKey());
|
||||
}
|
||||
|
||||
}
|
|
@ -4,8 +4,10 @@ import java.util.Map;
|
|||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import javax.transaction.Transaction;
|
||||
import javax.transaction.TransactionManager;
|
||||
|
||||
import org.hibernate.cache.infinispan.util.Caches;
|
||||
import org.infinispan.AdvancedCache;
|
||||
import org.infinispan.context.Flag;
|
||||
import org.infinispan.notifications.Listener;
|
||||
import org.infinispan.notifications.cachelistener.annotation.CacheEntryModified;
|
||||
import org.infinispan.notifications.cachelistener.annotation.CacheEntryRemoved;
|
||||
|
@ -14,8 +16,6 @@ import org.infinispan.notifications.cachelistener.event.CacheEntryRemovedEvent;
|
|||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.impl.BaseGeneralDataRegion;
|
||||
import org.hibernate.cache.infinispan.util.CacheAdapter;
|
||||
import org.hibernate.cache.infinispan.util.FlagAdapter;
|
||||
import org.hibernate.cache.spi.RegionFactory;
|
||||
import org.hibernate.cache.spi.TimestampsRegion;
|
||||
|
||||
|
@ -26,110 +26,64 @@ import org.hibernate.cache.spi.TimestampsRegion;
|
|||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
@Listener
|
||||
public class TimestampsRegionImpl extends BaseGeneralDataRegion implements TimestampsRegion {
|
||||
|
||||
private Map localCache = new ConcurrentHashMap();
|
||||
private final AdvancedCache removeCache;
|
||||
private final AdvancedCache timestampsPutCache;
|
||||
|
||||
public TimestampsRegionImpl(CacheAdapter cacheAdapter, String name, TransactionManager transactionManager, RegionFactory factory) {
|
||||
super(cacheAdapter, name, transactionManager, factory);
|
||||
cacheAdapter.addListener(this);
|
||||
populateLocalCache();
|
||||
public TimestampsRegionImpl(AdvancedCache cache, String name,
|
||||
RegionFactory factory) {
|
||||
super(cache, name, factory);
|
||||
this.removeCache = Caches.ignoreReturnValuesCache(cache);
|
||||
|
||||
// Skip locking when updating timestamps to provide better performance
|
||||
// under highly concurrent insert scenarios, where update timestamps
|
||||
// for an entity/collection type are constantly updated, creating
|
||||
// contention.
|
||||
//
|
||||
// The worst it can happen is that an earlier an earlier timestamp
|
||||
// (i.e. ts=1) will override a later on (i.e. ts=2), so it means that
|
||||
// in highly concurrent environments, queries might be considered stale
|
||||
// earlier in time. The upside is that inserts/updates are way faster
|
||||
// in local set ups.
|
||||
this.timestampsPutCache = getTimestampsPutCache(cache);
|
||||
}
|
||||
|
||||
protected AdvancedCache getTimestampsPutCache(AdvancedCache cache) {
|
||||
return Caches.ignoreReturnValuesCache(cache, Flag.SKIP_LOCKING);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void evict(Object key) throws CacheException {
|
||||
// TODO Is this a valid operation on a timestamps cache?
|
||||
cacheAdapter.remove(key);
|
||||
removeCache.remove(key);
|
||||
}
|
||||
|
||||
public void evictAll() throws CacheException {
|
||||
// TODO Is this a valid operation on a timestamps cache?
|
||||
Transaction tx = suspend();
|
||||
try {
|
||||
invalidateRegion(); // Invalidate the local region and then go remote
|
||||
cacheAdapter.broadcastEvictAll();
|
||||
invalidateRegion(); // Invalidate the local region
|
||||
} finally {
|
||||
resume(tx);
|
||||
}
|
||||
}
|
||||
|
||||
public Object get(Object key) throws CacheException {
|
||||
Object value = localCache.get(key);
|
||||
if (checkValid())
|
||||
return cache.get(key);
|
||||
|
||||
// If the region is not valid, skip cache store to avoid going remote to retrieve the query.
|
||||
// The aim of this is to maintain same logic/semantics as when state transfer was configured.
|
||||
// TODO: Once https://issues.jboss.org/browse/ISPN-835 has been resolved, revert to state transfer and remove workaround
|
||||
boolean skipCacheStore = false;
|
||||
if (!isValid())
|
||||
skipCacheStore = true;
|
||||
|
||||
if (value == null && checkValid()) {
|
||||
if (skipCacheStore)
|
||||
value = get(key, false, FlagAdapter.SKIP_CACHE_STORE);
|
||||
else
|
||||
value = get(key, false);
|
||||
|
||||
if (value != null)
|
||||
localCache.put(key, value);
|
||||
}
|
||||
return value;
|
||||
return null;
|
||||
}
|
||||
|
||||
public void put(final Object key, final Object value) throws CacheException {
|
||||
try {
|
||||
// We ensure ASYNC semantics (JBCACHE-1175) and make sure previous
|
||||
// value is not loaded from cache store cos it's not needed.
|
||||
cacheAdapter.withFlags(FlagAdapter.FORCE_ASYNCHRONOUS).put(key, value);
|
||||
timestampsPutCache.put(key, value);
|
||||
} catch (Exception e) {
|
||||
throw new CacheException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void destroy() throws CacheException {
|
||||
localCache.clear();
|
||||
cacheAdapter.removeListener(this);
|
||||
super.destroy();
|
||||
}
|
||||
|
||||
/**
|
||||
* Monitors cache events and updates the local cache
|
||||
*
|
||||
* @param event
|
||||
*/
|
||||
@CacheEntryModified
|
||||
@SuppressWarnings("unused")
|
||||
public void nodeModified(CacheEntryModifiedEvent event) {
|
||||
if (!event.isPre())
|
||||
localCache.put(event.getKey(), event.getValue());
|
||||
}
|
||||
|
||||
/**
|
||||
* Monitors cache events and updates the local cache
|
||||
*
|
||||
* @param event
|
||||
*/
|
||||
@CacheEntryRemoved
|
||||
@SuppressWarnings("unused")
|
||||
public void nodeRemoved(CacheEntryRemovedEvent event) {
|
||||
if (event.isPre()) return;
|
||||
localCache.remove(event.getKey());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void invalidateRegion() {
|
||||
super.invalidateRegion(); // Invalidate first
|
||||
localCache.clear();
|
||||
}
|
||||
|
||||
/**
|
||||
* Brings all data from the distributed cache into our local cache.
|
||||
*/
|
||||
private void populateLocalCache() {
|
||||
Set children = cacheAdapter.keySet();
|
||||
for (Object key : children)
|
||||
get(key);
|
||||
}
|
||||
|
||||
}
|
|
@ -1,33 +0,0 @@
|
|||
/*
|
||||
* JBoss, Home of Professional Open Source.
|
||||
* Copyright 2009, Red Hat, Inc. and/or its affiliates, and
|
||||
* individual contributors as indicated by the @author tags. See the
|
||||
* copyright.txt file in the distribution for a full listing of
|
||||
* individual contributors.
|
||||
*
|
||||
* This is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU Lesser General Public License as
|
||||
* published by the Free Software Foundation; either version 2.1 of
|
||||
* the License, or (at your option) any later version.
|
||||
*
|
||||
* This software is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this software; if not, write to the Free
|
||||
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
||||
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.util;
|
||||
|
||||
|
||||
/**
|
||||
* AddressAdapter.
|
||||
*
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
public interface AddressAdapter {
|
||||
}
|
|
@ -1,88 +0,0 @@
|
|||
/*
|
||||
* JBoss, Home of Professional Open Source.
|
||||
* Copyright 2009, Red Hat, Inc. and/or its affiliates, and
|
||||
* individual contributors as indicated by the @author tags. See the
|
||||
* copyright.txt file in the distribution for a full listing of
|
||||
* individual contributors.
|
||||
*
|
||||
* This is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU Lesser General Public License as
|
||||
* published by the Free Software Foundation; either version 2.1 of
|
||||
* the License, or (at your option) any later version.
|
||||
*
|
||||
* This software is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this software; if not, write to the Free
|
||||
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
||||
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.util;
|
||||
|
||||
import java.io.Externalizable;
|
||||
import java.io.IOException;
|
||||
import java.io.ObjectInput;
|
||||
import java.io.ObjectOutput;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.infinispan.remoting.transport.Address;
|
||||
|
||||
/**
|
||||
* AddressAdapterImpl.
|
||||
*
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
public class AddressAdapterImpl implements AddressAdapter, Externalizable {
|
||||
|
||||
private Address address;
|
||||
|
||||
// Required by Java Externalizable
|
||||
public AddressAdapterImpl() {
|
||||
}
|
||||
|
||||
public AddressAdapterImpl(Address address) {
|
||||
this.address = address;
|
||||
}
|
||||
|
||||
static AddressAdapter newInstance(Address address) {
|
||||
return new AddressAdapterImpl(address);
|
||||
}
|
||||
|
||||
public static List<AddressAdapter> toAddressAdapter(List<Address> ispnAddresses) {
|
||||
List<AddressAdapter> addresses = new ArrayList<AddressAdapter>(ispnAddresses.size());
|
||||
for (Address address : ispnAddresses) {
|
||||
addresses.add(AddressAdapterImpl.newInstance(address));
|
||||
}
|
||||
return addresses;
|
||||
}
|
||||
|
||||
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
|
||||
address = (Address) in.readObject();
|
||||
}
|
||||
|
||||
public void writeExternal(ObjectOutput out) throws IOException {
|
||||
out.writeObject(address);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == this)
|
||||
return true;
|
||||
if (!(obj instanceof AddressAdapterImpl))
|
||||
return false;
|
||||
AddressAdapterImpl other = (AddressAdapterImpl) obj;
|
||||
return other.address.equals(address);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = 17;
|
||||
result = 31 * result + address.hashCode();
|
||||
return result;
|
||||
}
|
||||
}
|
|
@ -1,228 +0,0 @@
|
|||
/*
|
||||
* JBoss, Home of Professional Open Source.
|
||||
* Copyright 2009, Red Hat, Inc. and/or its affiliates, and
|
||||
* individual contributors as indicated by the @author tags. See the
|
||||
* copyright.txt file in the distribution for a full listing of
|
||||
* individual contributors.
|
||||
*
|
||||
* This is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU Lesser General Public License as
|
||||
* published by the Free Software Foundation; either version 2.1 of
|
||||
* the License, or (at your option) any later version.
|
||||
*
|
||||
* This software is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this software; if not, write to the Free
|
||||
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
||||
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.util;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Callable;
|
||||
|
||||
import org.infinispan.Cache;
|
||||
import org.infinispan.config.Configuration;
|
||||
import org.infinispan.util.concurrent.TimeoutException;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
|
||||
/**
|
||||
* Infinispan cache abstraction.
|
||||
*
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
public interface CacheAdapter {
|
||||
|
||||
/**
|
||||
* Is this cache participating in a cluster with invalidation?
|
||||
*
|
||||
* @return true if the cache is configured for synchronous/asynchronous invalidation; false otherwise.
|
||||
*/
|
||||
boolean isClusteredInvalidation();
|
||||
|
||||
/**
|
||||
* Is this cache participating in a cluster with replication?
|
||||
*
|
||||
* @return true if the cache is configured for synchronous/asynchronous invalidation; false otherwise.
|
||||
*/
|
||||
boolean isClusteredReplication();
|
||||
|
||||
/**
|
||||
* Is this cache configured for synchronous communication?
|
||||
*
|
||||
* @return true if the cache is configured for synchronous communication; false otherwise.
|
||||
*/
|
||||
boolean isSynchronous();
|
||||
|
||||
/**
|
||||
* Set of keys of this cache.
|
||||
*
|
||||
* @return Set containing keys stored in this cache.
|
||||
*/
|
||||
Set keySet();
|
||||
|
||||
/**
|
||||
* A builder-style method that adds flags to any cache API call.
|
||||
*
|
||||
* @param flagAdapters a set of flags to apply. See the {@link FlagAdapter} documentation.
|
||||
* @return a cache on which a real operation is to be invoked.
|
||||
*/
|
||||
CacheAdapter withFlags(FlagAdapter... flagAdapters);
|
||||
|
||||
/**
|
||||
* Method to check whether a certain key exists in this cache.
|
||||
*
|
||||
* @param key key to look up.
|
||||
* @return true if key is present, false otherwise.
|
||||
*/
|
||||
boolean containsKey(Object key);
|
||||
|
||||
/**
|
||||
* Performs an <code>get(Object)</code> on the cache, wrapping any exception in a {@link CacheException}.
|
||||
*
|
||||
* @param key key to retrieve
|
||||
* @throws CacheException
|
||||
*/
|
||||
Object get(Object key) throws CacheException;
|
||||
|
||||
/**
|
||||
* Performs an <code>get(Object)</code> on the cache ignoring any {@link TimeoutException}
|
||||
* and wrapping any other exception in a {@link CacheException}.
|
||||
*
|
||||
* @param key key to retrieve
|
||||
* @throws CacheException
|
||||
*/
|
||||
Object getAllowingTimeout(Object key) throws CacheException;
|
||||
|
||||
/**
|
||||
* Performs a <code>put(Object, Object)</code> on the cache,
|
||||
* wrapping any exception in a {@link CacheException}.
|
||||
*
|
||||
* @param key key whose value will be modified
|
||||
* @param value data to store in the cache entry
|
||||
* @throws CacheException
|
||||
*/
|
||||
void put(Object key, Object value) throws CacheException;
|
||||
|
||||
/**
|
||||
* Performs a <code>put(Object, Object)</code> on the cache ignoring
|
||||
* any {@link TimeoutException} and wrapping any exception in a
|
||||
* {@link CacheException}.
|
||||
*
|
||||
* @param key key whose value will be modified
|
||||
* @param value data to store in the cache entry
|
||||
* @throws CacheException
|
||||
*/
|
||||
void putAllowingTimeout(Object key, Object value) throws CacheException;
|
||||
|
||||
/**
|
||||
* See {@link Cache#putForExternalRead(Object, Object)} for detailed documentation.
|
||||
*
|
||||
* @param key key with which the specified value is to be associated.
|
||||
* @param value value to be associated with the specified key.
|
||||
* @throws CacheException
|
||||
*/
|
||||
void putForExternalRead(Object key, Object value) throws CacheException;
|
||||
|
||||
/**
|
||||
* Performs a <code>remove(Object)</code>, wrapping any exception in
|
||||
* a {@link CacheException}.
|
||||
*
|
||||
* @param key key to be removed
|
||||
* @throws CacheException
|
||||
*/
|
||||
void remove(Object key) throws CacheException;
|
||||
|
||||
/**
|
||||
* Evict the given key from memory.
|
||||
*
|
||||
* @param key to evict.
|
||||
*/
|
||||
void evict(Object key) throws CacheException;
|
||||
|
||||
/**
|
||||
* Clear the cache.
|
||||
*
|
||||
* @throws CacheException
|
||||
*/
|
||||
void clear() throws CacheException;
|
||||
|
||||
/**
|
||||
* Stops the cache.
|
||||
*/
|
||||
void stop();
|
||||
|
||||
/**
|
||||
* Add listener to this cache.
|
||||
*
|
||||
* @param listener to be added to cache.
|
||||
*/
|
||||
void addListener(Object listener);
|
||||
|
||||
/**
|
||||
* Get local cluster address.
|
||||
*
|
||||
* @return Address representing local address.
|
||||
*/
|
||||
AddressAdapter getAddress();
|
||||
|
||||
/**
|
||||
* Get cluster members.
|
||||
*
|
||||
* @return List of cluster member Address instances
|
||||
*/
|
||||
List<AddressAdapter> getMembers();
|
||||
|
||||
/**
|
||||
* Size of cache.
|
||||
*
|
||||
* @return number of cache entries.
|
||||
*/
|
||||
int size();
|
||||
|
||||
/**
|
||||
* This method returns a Map view of the cache.
|
||||
*
|
||||
* @return Map view of cache.
|
||||
*/
|
||||
Map toMap();
|
||||
|
||||
/**
|
||||
* Remove listener from cache instance.
|
||||
*
|
||||
* @param listener to be removed.
|
||||
*/
|
||||
void removeListener(Object listener);
|
||||
|
||||
/**
|
||||
* Get cache configuration.
|
||||
*
|
||||
* @return Configuration instance associated with this cache.
|
||||
*/
|
||||
Configuration getConfiguration();
|
||||
|
||||
/**
|
||||
* TODO
|
||||
*/
|
||||
void broadcastEvictAll();
|
||||
|
||||
/**
|
||||
* TODO
|
||||
*
|
||||
* @param c
|
||||
* @param <T>
|
||||
* @return
|
||||
*/
|
||||
<T> T withinTx(Callable<T> c) throws Exception;
|
||||
|
||||
Cache getCache();
|
||||
|
||||
}
|
|
@ -1,255 +0,0 @@
|
|||
/*
|
||||
* JBoss, Home of Professional Open Source.
|
||||
* Copyright 2009, Red Hat, Inc. and/or its affiliates, and
|
||||
* individual contributors as indicated by the @author tags. See the
|
||||
* copyright.txt file in the distribution for a full listing of
|
||||
* individual contributors.
|
||||
*
|
||||
* This is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU Lesser General Public License as
|
||||
* published by the Free Software Foundation; either version 2.1 of
|
||||
* the License, or (at your option) any later version.
|
||||
*
|
||||
* This software is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this software; if not, write to the Free
|
||||
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
||||
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.util;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Callable;
|
||||
|
||||
import org.infinispan.AdvancedCache;
|
||||
import org.infinispan.Cache;
|
||||
import org.infinispan.config.Configuration;
|
||||
import org.infinispan.context.Flag;
|
||||
import org.infinispan.remoting.rpc.RpcManager;
|
||||
import org.infinispan.util.concurrent.TimeoutException;
|
||||
import org.infinispan.util.logging.Log;
|
||||
import org.infinispan.util.logging.LogFactory;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
|
||||
/**
|
||||
* CacheAdapterImpl.
|
||||
*
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
public class CacheAdapterImpl implements CacheAdapter {
|
||||
private static final Log log = LogFactory.getLog(CacheAdapterImpl.class);
|
||||
|
||||
private final AdvancedCache cache;
|
||||
private final CacheCommandInitializer cacheCmdInitializer;
|
||||
private final boolean isSync;
|
||||
|
||||
private CacheAdapterImpl(AdvancedCache cache) {
|
||||
this.cache = cache;
|
||||
this.cacheCmdInitializer = cache.getComponentRegistry()
|
||||
.getComponent(CacheCommandInitializer.class);
|
||||
this.isSync = isSynchronous(cache.getConfiguration().getCacheMode());
|
||||
}
|
||||
|
||||
public static CacheAdapter newInstance(AdvancedCache cache) {
|
||||
return new CacheAdapterImpl(cache);
|
||||
}
|
||||
|
||||
public boolean isClusteredInvalidation() {
|
||||
return isClusteredInvalidation(cache.getConfiguration().getCacheMode());
|
||||
}
|
||||
|
||||
public boolean isClusteredReplication() {
|
||||
return isClusteredReplication(cache.getConfiguration().getCacheMode());
|
||||
}
|
||||
|
||||
public boolean isSynchronous() {
|
||||
return isSync;
|
||||
}
|
||||
|
||||
public Set keySet() {
|
||||
return cache.keySet();
|
||||
}
|
||||
|
||||
public CacheAdapter withFlags(FlagAdapter... flagAdapters) {
|
||||
Flag[] flags = FlagAdapter.toFlags(flagAdapters);
|
||||
return newInstance(cache.withFlags(flags));
|
||||
}
|
||||
|
||||
public Object get(Object key) throws CacheException {
|
||||
try {
|
||||
return cache.get(key);
|
||||
} catch (Exception e) {
|
||||
throw new CacheException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public Object getAllowingTimeout(Object key) throws CacheException {
|
||||
try {
|
||||
return getFailSilentCache().get(key);
|
||||
} catch (TimeoutException ignored) {
|
||||
// ignore it
|
||||
return null;
|
||||
} catch (Exception e) {
|
||||
throw new CacheException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public void put(Object key, Object value) throws CacheException {
|
||||
try {
|
||||
// No previous value interest, so apply flags that avoid remote lookups.
|
||||
getSkipRemoteGetLoadCache().put(key, value);
|
||||
} catch (Exception e) {
|
||||
throw new CacheException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public void putAllowingTimeout(Object key, Object value) throws CacheException {
|
||||
try {
|
||||
// No previous value interest, so apply flags that avoid remote lookups.
|
||||
getFailSilentCacheSkipRemotes().put(key, value);
|
||||
} catch (TimeoutException allowed) {
|
||||
// ignore it
|
||||
} catch (Exception e) {
|
||||
throw new CacheException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public void putForExternalRead(Object key, Object value) throws CacheException {
|
||||
try {
|
||||
// No previous value interest, so apply flags that avoid remote lookups.
|
||||
getFailSilentCacheSkipRemotes().putForExternalRead(key, value);
|
||||
} catch (Exception e) {
|
||||
throw new CacheException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public void remove(Object key) throws CacheException {
|
||||
try {
|
||||
// No previous value interest, so apply flags that avoid remote lookups.
|
||||
getSkipRemoteGetLoadCache().remove(key);
|
||||
} catch (Exception e) {
|
||||
throw new CacheException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public void evict(Object key) throws CacheException {
|
||||
try {
|
||||
cache.evict(key);
|
||||
} catch (Exception e) {
|
||||
throw new CacheException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public void clear() throws CacheException {
|
||||
try {
|
||||
cache.clear();
|
||||
} catch (Exception e) {
|
||||
throw new CacheException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public void stop() {
|
||||
if (log.isTraceEnabled())
|
||||
log.trace("Stop " + cache);
|
||||
cache.stop();
|
||||
}
|
||||
|
||||
private static boolean isClusteredInvalidation(Configuration.CacheMode cacheMode) {
|
||||
return cacheMode == Configuration.CacheMode.INVALIDATION_ASYNC
|
||||
|| cacheMode == Configuration.CacheMode.INVALIDATION_SYNC;
|
||||
}
|
||||
|
||||
private static boolean isClusteredReplication(Configuration.CacheMode cacheMode) {
|
||||
return cacheMode == Configuration.CacheMode.REPL_ASYNC
|
||||
|| cacheMode == Configuration.CacheMode.REPL_SYNC;
|
||||
}
|
||||
|
||||
private static boolean isSynchronous(Configuration.CacheMode cacheMode) {
|
||||
return cacheMode == Configuration.CacheMode.REPL_SYNC
|
||||
|| cacheMode == Configuration.CacheMode.INVALIDATION_SYNC
|
||||
|| cacheMode == Configuration.CacheMode.DIST_SYNC;
|
||||
}
|
||||
|
||||
public void addListener(Object listener) {
|
||||
cache.addListener(listener);
|
||||
}
|
||||
|
||||
public AddressAdapter getAddress() {
|
||||
RpcManager rpc = cache.getRpcManager();
|
||||
if (rpc != null) {
|
||||
return AddressAdapterImpl.newInstance(rpc.getTransport().getAddress());
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public List<AddressAdapter> getMembers() {
|
||||
RpcManager rpc = cache.getRpcManager();
|
||||
if (rpc != null) {
|
||||
return AddressAdapterImpl.toAddressAdapter(rpc.getTransport().getMembers());
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public int size() {
|
||||
return cache.size();
|
||||
}
|
||||
|
||||
public Map toMap() {
|
||||
return cache;
|
||||
}
|
||||
|
||||
public void removeListener(Object listener) {
|
||||
cache.removeListener(listener);
|
||||
}
|
||||
|
||||
public boolean containsKey(Object key) {
|
||||
return cache.containsKey(key);
|
||||
}
|
||||
|
||||
public Configuration getConfiguration() {
|
||||
return cache.getConfiguration();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void broadcastEvictAll() {
|
||||
RpcManager rpcManager = cache.getRpcManager();
|
||||
if (rpcManager != null) {
|
||||
// Only broadcast evict all if it's clustered
|
||||
EvictAllCommand cmd = cacheCmdInitializer.buildEvictAllCommand(cache.getName());
|
||||
rpcManager.broadcastRpcCommand(cmd, isSync);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> T withinTx(Callable<T> c) throws Exception {
|
||||
return CacheHelper.withinTx(cache.getTransactionManager(), c);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Cache getCache() {
|
||||
return cache;
|
||||
}
|
||||
|
||||
private Cache getFailSilentCache() {
|
||||
return cache.withFlags(Flag.FAIL_SILENTLY);
|
||||
}
|
||||
|
||||
private Cache getSkipRemoteGetLoadCache() {
|
||||
return cache.withFlags(
|
||||
Flag.SKIP_CACHE_LOAD, Flag.SKIP_REMOTE_LOOKUP);
|
||||
}
|
||||
|
||||
private Cache getFailSilentCacheSkipRemotes() {
|
||||
return cache.withFlags(
|
||||
Flag.FAIL_SILENTLY, Flag.SKIP_CACHE_LOAD, Flag.SKIP_REMOTE_LOOKUP);
|
||||
}
|
||||
|
||||
}
|
141
hibernate-infinispan/src/main/java/org/hibernate/cache/infinispan/util/Caches.java
vendored
Normal file
141
hibernate-infinispan/src/main/java/org/hibernate/cache/infinispan/util/Caches.java
vendored
Normal file
|
@ -0,0 +1,141 @@
|
|||
/*
|
||||
* JBoss, Home of Professional Open Source
|
||||
* Copyright 2012 Red Hat Inc. and/or its affiliates and other
|
||||
* contributors as indicated by the @author tags. All rights reserved.
|
||||
* See the copyright.txt in the distribution for a full listing of
|
||||
* individual contributors.
|
||||
*
|
||||
* This is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU Lesser General Public License as
|
||||
* published by the Free Software Foundation; either version 2.1 of
|
||||
* the License, or (at your option) any later version.
|
||||
*
|
||||
* This software is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this software; if not, write to the Free
|
||||
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
||||
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
|
||||
*/
|
||||
|
||||
package org.hibernate.cache.infinispan.util;
|
||||
|
||||
import org.infinispan.AdvancedCache;
|
||||
import org.infinispan.context.Flag;
|
||||
import org.infinispan.remoting.rpc.RpcManager;
|
||||
|
||||
import javax.transaction.Status;
|
||||
import javax.transaction.TransactionManager;
|
||||
import java.util.concurrent.Callable;
|
||||
|
||||
/**
|
||||
* Helper for dealing with Infinispan cache instances.
|
||||
*
|
||||
* @author Galder Zamarreño
|
||||
* @since 4.1
|
||||
*/
|
||||
public class Caches {
|
||||
|
||||
private Caches() {
|
||||
// Suppresses default constructor, ensuring non-instantiability.
|
||||
}
|
||||
|
||||
public static <T> T withinTx(AdvancedCache cache,
|
||||
Callable<T> c) throws Exception {
|
||||
// Retrieve transaction manager
|
||||
return withinTx(cache.getTransactionManager(), c);
|
||||
}
|
||||
|
||||
public static <T> T withinTx(TransactionManager tm,
|
||||
Callable<T> c) throws Exception {
|
||||
tm.begin();
|
||||
try {
|
||||
return c.call();
|
||||
} catch (Exception e) {
|
||||
tm.setRollbackOnly();
|
||||
throw e;
|
||||
} finally {
|
||||
if (tm.getStatus() == Status.STATUS_ACTIVE) tm.commit();
|
||||
else tm.rollback();
|
||||
}
|
||||
}
|
||||
|
||||
public static AdvancedCache localCache(AdvancedCache cache) {
|
||||
return cache.withFlags(Flag.CACHE_MODE_LOCAL);
|
||||
}
|
||||
|
||||
public static AdvancedCache ignoreReturnValuesCache(AdvancedCache cache) {
|
||||
return cache.withFlags(Flag.SKIP_CACHE_LOAD, Flag.SKIP_REMOTE_LOOKUP);
|
||||
}
|
||||
|
||||
public static AdvancedCache ignoreReturnValuesCache(
|
||||
AdvancedCache cache, Flag extraFlag) {
|
||||
return cache.withFlags(
|
||||
Flag.SKIP_CACHE_LOAD, Flag.SKIP_REMOTE_LOOKUP, extraFlag);
|
||||
}
|
||||
|
||||
public static AdvancedCache asyncWriteCache(AdvancedCache cache,
|
||||
Flag extraFlag) {
|
||||
return cache.withFlags(
|
||||
Flag.SKIP_CACHE_LOAD,
|
||||
Flag.SKIP_REMOTE_LOOKUP,
|
||||
Flag.FORCE_ASYNCHRONOUS,
|
||||
extraFlag);
|
||||
}
|
||||
|
||||
public static AdvancedCache failSilentWriteCache(AdvancedCache cache) {
|
||||
return cache.withFlags(
|
||||
Flag.FAIL_SILENTLY,
|
||||
Flag.ZERO_LOCK_ACQUISITION_TIMEOUT,
|
||||
Flag.SKIP_CACHE_LOAD,
|
||||
Flag.SKIP_REMOTE_LOOKUP);
|
||||
}
|
||||
|
||||
public static AdvancedCache failSilentWriteCache(AdvancedCache cache,
|
||||
Flag extraFlag) {
|
||||
return cache.withFlags(
|
||||
Flag.FAIL_SILENTLY,
|
||||
Flag.ZERO_LOCK_ACQUISITION_TIMEOUT,
|
||||
Flag.SKIP_CACHE_LOAD,
|
||||
Flag.SKIP_REMOTE_LOOKUP,
|
||||
extraFlag);
|
||||
}
|
||||
|
||||
public static AdvancedCache failSilentReadCache(AdvancedCache cache) {
|
||||
return cache.withFlags(
|
||||
Flag.FAIL_SILENTLY,
|
||||
Flag.ZERO_LOCK_ACQUISITION_TIMEOUT);
|
||||
}
|
||||
|
||||
public static void broadcastEvictAll(AdvancedCache cache) {
|
||||
RpcManager rpcManager = cache.getRpcManager();
|
||||
if (rpcManager != null) {
|
||||
// Only broadcast evict all if it's clustered
|
||||
CacheCommandInitializer factory = cache.getComponentRegistry()
|
||||
.getComponent(CacheCommandInitializer.class);
|
||||
boolean isSync = isSynchronousCache(cache);
|
||||
|
||||
EvictAllCommand cmd = factory.buildEvictAllCommand(cache.getName());
|
||||
rpcManager.broadcastRpcCommand(cmd, isSync);
|
||||
}
|
||||
}
|
||||
|
||||
public static boolean isInvalidationCache(AdvancedCache cache) {
|
||||
return cache.getCacheConfiguration()
|
||||
.clustering().cacheMode().isInvalidation();
|
||||
}
|
||||
|
||||
public static boolean isSynchronousCache(AdvancedCache cache) {
|
||||
return cache.getCacheConfiguration()
|
||||
.clustering().cacheMode().isSynchronous();
|
||||
}
|
||||
|
||||
public static boolean isClustered(AdvancedCache cache) {
|
||||
return cache.getCacheConfiguration()
|
||||
.clustering().cacheMode().isClustered();
|
||||
}
|
||||
|
||||
}
|
|
@ -1,69 +0,0 @@
|
|||
/*
|
||||
* JBoss, Home of Professional Open Source.
|
||||
* Copyright 2009, Red Hat, Inc. and/or its affiliates, and
|
||||
* individual contributors as indicated by the @author tags. See the
|
||||
* copyright.txt file in the distribution for a full listing of
|
||||
* individual contributors.
|
||||
*
|
||||
* This is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU Lesser General Public License as
|
||||
* published by the Free Software Foundation; either version 2.1 of
|
||||
* the License, or (at your option) any later version.
|
||||
*
|
||||
* This software is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this software; if not, write to the Free
|
||||
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
||||
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.util;
|
||||
|
||||
import org.infinispan.context.Flag;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
|
||||
/**
|
||||
* FlagAdapter.
|
||||
*
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
public enum FlagAdapter {
|
||||
ZERO_LOCK_ACQUISITION_TIMEOUT,
|
||||
CACHE_MODE_LOCAL,
|
||||
FORCE_ASYNCHRONOUS,
|
||||
FORCE_SYNCHRONOUS,
|
||||
SKIP_CACHE_STORE,
|
||||
SKIP_CACHE_LOAD;
|
||||
|
||||
Flag toFlag() {
|
||||
switch(this) {
|
||||
case ZERO_LOCK_ACQUISITION_TIMEOUT:
|
||||
return Flag.ZERO_LOCK_ACQUISITION_TIMEOUT;
|
||||
case CACHE_MODE_LOCAL:
|
||||
return Flag.CACHE_MODE_LOCAL;
|
||||
case FORCE_ASYNCHRONOUS:
|
||||
return Flag.FORCE_ASYNCHRONOUS;
|
||||
case FORCE_SYNCHRONOUS:
|
||||
return Flag.FORCE_SYNCHRONOUS;
|
||||
case SKIP_CACHE_STORE:
|
||||
return Flag.SKIP_CACHE_STORE;
|
||||
case SKIP_CACHE_LOAD:
|
||||
return Flag.SKIP_CACHE_LOAD;
|
||||
default:
|
||||
throw new CacheException("Unmatched Infinispan flag " + this);
|
||||
}
|
||||
}
|
||||
|
||||
static Flag[] toFlags(FlagAdapter[] adapters) {
|
||||
Flag[] flags = new Flag[adapters.length];
|
||||
for (int i = 0; i < adapters.length; i++) {
|
||||
flags[i] = adapters[i].toFlag();
|
||||
}
|
||||
return flags;
|
||||
}
|
||||
}
|
|
@ -25,13 +25,13 @@ package org.hibernate.test.cache.infinispan;
|
|||
|
||||
import java.util.Set;
|
||||
|
||||
import org.infinispan.AdvancedCache;
|
||||
import org.infinispan.transaction.tm.BatchModeTransactionManager;
|
||||
import org.jboss.logging.Logger;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.hibernate.boot.registry.StandardServiceRegistryBuilder;
|
||||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.hibernate.cache.infinispan.util.CacheAdapter;
|
||||
import org.hibernate.cache.spi.GeneralDataRegion;
|
||||
import org.hibernate.cache.spi.QueryResultsRegion;
|
||||
import org.hibernate.cache.spi.Region;
|
||||
|
@ -152,7 +152,7 @@ public abstract class AbstractGeneralDataRegionTestCase extends AbstractRegionIm
|
|||
cfg,
|
||||
getCacheTestSupport()
|
||||
);
|
||||
CacheAdapter localCache = getInfinispanCache( regionFactory );
|
||||
AdvancedCache localCache = getInfinispanCache( regionFactory );
|
||||
|
||||
// Sleep a bit to avoid concurrent FLUSH problem
|
||||
avoidConcurrentFlush();
|
||||
|
@ -170,7 +170,7 @@ public abstract class AbstractGeneralDataRegionTestCase extends AbstractRegionIm
|
|||
cfg,
|
||||
getCacheTestSupport()
|
||||
);
|
||||
CacheAdapter remoteCache = getInfinispanCache( regionFactory );
|
||||
AdvancedCache remoteCache = getInfinispanCache( regionFactory );
|
||||
|
||||
// Sleep a bit to avoid concurrent FLUSH problem
|
||||
avoidConcurrentFlush();
|
||||
|
|
|
@ -26,11 +26,11 @@ package org.hibernate.test.cache.infinispan;
|
|||
import java.util.Properties;
|
||||
|
||||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.hibernate.cache.infinispan.util.CacheAdapter;
|
||||
import org.hibernate.cache.internal.CacheDataDescriptionImpl;
|
||||
import org.hibernate.cache.spi.CacheDataDescription;
|
||||
import org.hibernate.cache.spi.Region;
|
||||
import org.hibernate.internal.util.compare.ComparableComparator;
|
||||
import org.infinispan.AdvancedCache;
|
||||
|
||||
/**
|
||||
* Base class for tests of Region implementations.
|
||||
|
@ -40,7 +40,7 @@ import org.hibernate.internal.util.compare.ComparableComparator;
|
|||
*/
|
||||
public abstract class AbstractRegionImplTestCase extends AbstractNonFunctionalTestCase {
|
||||
|
||||
protected abstract CacheAdapter getInfinispanCache(InfinispanRegionFactory regionFactory);
|
||||
protected abstract AdvancedCache getInfinispanCache(InfinispanRegionFactory regionFactory);
|
||||
|
||||
protected abstract Region createRegion(InfinispanRegionFactory regionFactory, String regionName, Properties properties, CacheDataDescription cdd);
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ package org.hibernate.test.cache.infinispan;
|
|||
import java.util.Properties;
|
||||
import javax.transaction.TransactionManager;
|
||||
|
||||
import org.infinispan.AdvancedCache;
|
||||
import org.infinispan.config.Configuration;
|
||||
import org.infinispan.config.Configuration.CacheMode;
|
||||
import org.infinispan.eviction.EvictionStrategy;
|
||||
|
@ -38,7 +39,6 @@ import org.hibernate.cache.infinispan.entity.EntityRegionImpl;
|
|||
import org.hibernate.cache.infinispan.query.QueryResultsRegionImpl;
|
||||
import org.hibernate.cache.infinispan.timestamp.TimestampsRegionImpl;
|
||||
import org.hibernate.cache.infinispan.tm.HibernateTransactionManagerLookup;
|
||||
import org.hibernate.cache.infinispan.util.CacheAdapter;
|
||||
import org.hibernate.cfg.Settings;
|
||||
import org.hibernate.engine.transaction.jta.platform.internal.AbstractJtaPlatform;
|
||||
import org.hibernate.engine.transaction.jta.platform.internal.JBossStandAloneJtaPlatform;
|
||||
|
@ -137,13 +137,13 @@ public class InfinispanRegionFactoryTestCase {
|
|||
assertFalse(factory.getDefinedConfigurations().contains(person));
|
||||
assertNotNull(factory.getTypeOverrides().get(addresses));
|
||||
assertFalse(factory.getDefinedConfigurations().contains(addresses));
|
||||
CacheAdapter cache = null;
|
||||
AdvancedCache cache;
|
||||
|
||||
EntityRegionImpl region = (EntityRegionImpl) factory.buildEntityRegion(person, p, null);
|
||||
assertNotNull(factory.getTypeOverrides().get(person));
|
||||
assertTrue(factory.getDefinedConfigurations().contains(person));
|
||||
assertNull(factory.getTypeOverrides().get(address));
|
||||
cache = region.getCacheAdapter();
|
||||
cache = region.getCache();
|
||||
Configuration cacheCfg = cache.getConfiguration();
|
||||
assertEquals(EvictionStrategy.LRU, cacheCfg.getEvictionStrategy());
|
||||
assertEquals(2000, cacheCfg.getEvictionWakeUpInterval());
|
||||
|
@ -156,7 +156,7 @@ public class InfinispanRegionFactoryTestCase {
|
|||
assertNotNull(factory.getTypeOverrides().get(person));
|
||||
assertTrue(factory.getDefinedConfigurations().contains(person));
|
||||
assertNull(factory.getTypeOverrides().get(address));
|
||||
cache = region.getCacheAdapter();
|
||||
cache = region.getCache();
|
||||
cacheCfg = cache.getConfiguration();
|
||||
assertEquals(EvictionStrategy.FIFO, cacheCfg.getEvictionStrategy());
|
||||
assertEquals(3000, cacheCfg.getEvictionWakeUpInterval());
|
||||
|
@ -167,7 +167,7 @@ public class InfinispanRegionFactoryTestCase {
|
|||
assertNotNull(factory.getTypeOverrides().get(person));
|
||||
assertTrue(factory.getDefinedConfigurations().contains(person));
|
||||
assertNull(factory.getTypeOverrides().get(address));
|
||||
cache = region.getCacheAdapter();
|
||||
cache = region.getCache();
|
||||
cacheCfg = cache.getConfiguration();
|
||||
assertEquals(EvictionStrategy.FIFO, cacheCfg.getEvictionStrategy());
|
||||
assertEquals(3000, cacheCfg.getEvictionWakeUpInterval());
|
||||
|
@ -178,7 +178,7 @@ public class InfinispanRegionFactoryTestCase {
|
|||
assertNotNull(factory.getTypeOverrides().get(addresses));
|
||||
assertTrue(factory.getDefinedConfigurations().contains(person));
|
||||
assertNull(factory.getTypeOverrides().get(parts));
|
||||
cache = collectionRegion .getCacheAdapter();
|
||||
cache = collectionRegion .getCache();
|
||||
cacheCfg = cache.getConfiguration();
|
||||
assertEquals(EvictionStrategy.FIFO, cacheCfg.getEvictionStrategy());
|
||||
assertEquals(2500, cacheCfg.getEvictionWakeUpInterval());
|
||||
|
@ -191,7 +191,7 @@ public class InfinispanRegionFactoryTestCase {
|
|||
assertNotNull(factory.getTypeOverrides().get(addresses));
|
||||
assertTrue(factory.getDefinedConfigurations().contains(addresses));
|
||||
assertNull(factory.getTypeOverrides().get(parts));
|
||||
cache = collectionRegion.getCacheAdapter();
|
||||
cache = collectionRegion.getCache();
|
||||
cacheCfg = cache.getConfiguration();
|
||||
assertEquals(EvictionStrategy.LRU, cacheCfg.getEvictionStrategy());
|
||||
assertEquals(3500, cacheCfg.getEvictionWakeUpInterval());
|
||||
|
@ -202,7 +202,7 @@ public class InfinispanRegionFactoryTestCase {
|
|||
assertNotNull(factory.getTypeOverrides().get(addresses));
|
||||
assertTrue(factory.getDefinedConfigurations().contains(addresses));
|
||||
assertNull(factory.getTypeOverrides().get(parts));
|
||||
cache = collectionRegion.getCacheAdapter();
|
||||
cache = collectionRegion.getCache();
|
||||
cacheCfg = cache.getConfiguration();
|
||||
assertEquals(EvictionStrategy.LRU, cacheCfg.getEvictionStrategy());
|
||||
assertEquals(3500, cacheCfg.getEvictionWakeUpInterval());
|
||||
|
@ -215,7 +215,7 @@ public class InfinispanRegionFactoryTestCase {
|
|||
|
||||
@Test
|
||||
public void testBuildEntityCollectionRegionOverridesOnly() {
|
||||
CacheAdapter cache;
|
||||
AdvancedCache cache;
|
||||
Properties p = new Properties();
|
||||
p.setProperty("hibernate.cache.infinispan.entity.eviction.strategy", "FIFO");
|
||||
p.setProperty("hibernate.cache.infinispan.entity.eviction.wake_up_interval", "3000");
|
||||
|
@ -228,7 +228,7 @@ public class InfinispanRegionFactoryTestCase {
|
|||
try {
|
||||
EntityRegionImpl region = (EntityRegionImpl) factory.buildEntityRegion("com.acme.Address", p, null);
|
||||
assertNull(factory.getTypeOverrides().get("com.acme.Address"));
|
||||
cache = region.getCacheAdapter();
|
||||
cache = region.getCache();
|
||||
Configuration cacheCfg = cache.getConfiguration();
|
||||
assertEquals(EvictionStrategy.FIFO, cacheCfg.getEvictionStrategy());
|
||||
assertEquals(3000, cacheCfg.getEvictionWakeUpInterval());
|
||||
|
@ -237,7 +237,7 @@ public class InfinispanRegionFactoryTestCase {
|
|||
|
||||
CollectionRegionImpl collectionRegion = (CollectionRegionImpl) factory.buildCollectionRegion("com.acme.Person.addresses", p, null);
|
||||
assertNull(factory.getTypeOverrides().get("com.acme.Person.addresses"));
|
||||
cache = collectionRegion.getCacheAdapter();
|
||||
cache = collectionRegion.getCache();
|
||||
cacheCfg = cache.getConfiguration();
|
||||
assertEquals(EvictionStrategy.LRU, cacheCfg.getEvictionStrategy());
|
||||
assertEquals(3500, cacheCfg.getEvictionWakeUpInterval());
|
||||
|
@ -267,7 +267,7 @@ public class InfinispanRegionFactoryTestCase {
|
|||
EntityRegionImpl region = (EntityRegionImpl) factory.buildEntityRegion(person, p, null);
|
||||
assertNotNull(factory.getTypeOverrides().get(person));
|
||||
assertTrue(factory.getDefinedConfigurations().contains(person));
|
||||
CacheAdapter cache = region.getCacheAdapter();
|
||||
AdvancedCache cache = region.getCache();
|
||||
Configuration cacheCfg = cache.getConfiguration();
|
||||
assertEquals(EvictionStrategy.LRU, cacheCfg.getEvictionStrategy());
|
||||
assertEquals(3000, cacheCfg.getEvictionWakeUpInterval());
|
||||
|
@ -305,7 +305,7 @@ public class InfinispanRegionFactoryTestCase {
|
|||
config.setFetchInMemoryState(false);
|
||||
manager.defineConfiguration("timestamps", config);
|
||||
TimestampsRegionImpl region = (TimestampsRegionImpl) factory.buildTimestampsRegion(timestamps, p);
|
||||
CacheAdapter cache = region.getCacheAdapter();
|
||||
AdvancedCache cache = region.getCache();
|
||||
Configuration cacheCfg = cache.getConfiguration();
|
||||
assertEquals(EvictionStrategy.NONE, cacheCfg.getEvictionStrategy());
|
||||
assertEquals(CacheMode.REPL_ASYNC, cacheCfg.getCacheMode());
|
||||
|
@ -331,7 +331,7 @@ public class InfinispanRegionFactoryTestCase {
|
|||
config.setCacheMode(CacheMode.REPL_SYNC);
|
||||
manager.defineConfiguration("unrecommended-timestamps", config);
|
||||
TimestampsRegionImpl region = (TimestampsRegionImpl) factory.buildTimestampsRegion(timestamps, p);
|
||||
CacheAdapter cache = region.getCacheAdapter();
|
||||
AdvancedCache cache = region.getCache();
|
||||
Configuration cacheCfg = cache.getConfiguration();
|
||||
assertEquals(EvictionStrategy.NONE, cacheCfg.getEvictionStrategy());
|
||||
assertEquals(CacheMode.REPL_SYNC, cacheCfg.getCacheMode());
|
||||
|
@ -401,7 +401,7 @@ public class InfinispanRegionFactoryTestCase {
|
|||
try {
|
||||
assertTrue(factory.getDefinedConfigurations().contains("local-query"));
|
||||
QueryResultsRegionImpl region = (QueryResultsRegionImpl) factory.buildQueryResultsRegion(query, p);
|
||||
CacheAdapter cache = region.getCacheAdapter();
|
||||
AdvancedCache cache = region.getCache();
|
||||
Configuration cacheCfg = cache.getConfiguration();
|
||||
assertEquals(CacheMode.LOCAL, cacheCfg.getCacheMode());
|
||||
assertFalse(cacheCfg.isExposeJmxStatistics());
|
||||
|
@ -425,7 +425,7 @@ public class InfinispanRegionFactoryTestCase {
|
|||
QueryResultsRegionImpl region = (QueryResultsRegionImpl) factory.buildQueryResultsRegion(queryRegionName, p);
|
||||
assertNotNull(factory.getTypeOverrides().get(queryRegionName));
|
||||
assertTrue(factory.getDefinedConfigurations().contains(queryRegionName));
|
||||
CacheAdapter cache = region.getCacheAdapter();
|
||||
AdvancedCache cache = region.getCache();
|
||||
Configuration cacheCfg = cache.getConfiguration();
|
||||
assertEquals(EvictionStrategy.FIFO, cacheCfg.getEvictionStrategy());
|
||||
assertEquals(2222, cacheCfg.getEvictionWakeUpInterval());
|
||||
|
@ -449,18 +449,18 @@ public class InfinispanRegionFactoryTestCase {
|
|||
try {
|
||||
assertTrue(manager.getGlobalConfiguration().isExposeGlobalJmxStatistics());
|
||||
EntityRegionImpl region = (EntityRegionImpl) factory.buildEntityRegion("com.acme.Address", p, null);
|
||||
CacheAdapter cache = region.getCacheAdapter();
|
||||
AdvancedCache cache = region.getCache();
|
||||
assertTrue(factory.getTypeOverrides().get("entity").isExposeStatistics());
|
||||
assertTrue(cache.getConfiguration().isExposeJmxStatistics());
|
||||
|
||||
region = (EntityRegionImpl) factory.buildEntityRegion("com.acme.Person", p, null);
|
||||
cache = region.getCacheAdapter();
|
||||
cache = region.getCache();
|
||||
assertTrue(factory.getTypeOverrides().get("com.acme.Person").isExposeStatistics());
|
||||
assertTrue(cache.getConfiguration().isExposeJmxStatistics());
|
||||
|
||||
final String query = "org.hibernate.cache.internal.StandardQueryCache";
|
||||
QueryResultsRegionImpl queryRegion = (QueryResultsRegionImpl) factory.buildQueryResultsRegion(query, p);
|
||||
cache = queryRegion.getCacheAdapter();
|
||||
cache = queryRegion.getCache();
|
||||
assertTrue(factory.getTypeOverrides().get("query").isExposeStatistics());
|
||||
assertTrue(cache.getConfiguration().isExposeJmxStatistics());
|
||||
|
||||
|
@ -469,12 +469,12 @@ public class InfinispanRegionFactoryTestCase {
|
|||
config.setFetchInMemoryState(false);
|
||||
manager.defineConfiguration("timestamps", config);
|
||||
TimestampsRegionImpl timestampsRegion = (TimestampsRegionImpl) factory.buildTimestampsRegion(timestamps, p);
|
||||
cache = timestampsRegion.getCacheAdapter();
|
||||
cache = timestampsRegion.getCache();
|
||||
assertTrue(factory.getTypeOverrides().get("timestamps").isExposeStatistics());
|
||||
assertTrue(cache.getConfiguration().isExposeJmxStatistics());
|
||||
|
||||
CollectionRegionImpl collectionRegion = (CollectionRegionImpl) factory.buildCollectionRegion("com.acme.Person.addresses", p, null);
|
||||
cache = collectionRegion.getCacheAdapter();
|
||||
cache = collectionRegion.getCache();
|
||||
assertTrue(factory.getTypeOverrides().get("collection").isExposeStatistics());
|
||||
assertTrue(cache.getConfiguration().isExposeJmxStatistics());
|
||||
} finally {
|
||||
|
@ -496,18 +496,18 @@ public class InfinispanRegionFactoryTestCase {
|
|||
try {
|
||||
assertFalse(manager.getGlobalConfiguration().isExposeGlobalJmxStatistics());
|
||||
EntityRegionImpl region = (EntityRegionImpl) factory.buildEntityRegion("com.acme.Address", p, null);
|
||||
CacheAdapter cache = region.getCacheAdapter();
|
||||
AdvancedCache cache = region.getCache();
|
||||
assertFalse(factory.getTypeOverrides().get("entity").isExposeStatistics());
|
||||
assertFalse(cache.getConfiguration().isExposeJmxStatistics());
|
||||
|
||||
region = (EntityRegionImpl) factory.buildEntityRegion("com.acme.Person", p, null);
|
||||
cache = region.getCacheAdapter();
|
||||
cache = region.getCache();
|
||||
assertFalse(factory.getTypeOverrides().get("com.acme.Person").isExposeStatistics());
|
||||
assertFalse(cache.getConfiguration().isExposeJmxStatistics());
|
||||
|
||||
final String query = "org.hibernate.cache.internal.StandardQueryCache";
|
||||
QueryResultsRegionImpl queryRegion = (QueryResultsRegionImpl) factory.buildQueryResultsRegion(query, p);
|
||||
cache = queryRegion.getCacheAdapter();
|
||||
cache = queryRegion.getCache();
|
||||
assertFalse(factory.getTypeOverrides().get("query").isExposeStatistics());
|
||||
assertFalse(cache.getConfiguration().isExposeJmxStatistics());
|
||||
|
||||
|
@ -516,12 +516,12 @@ public class InfinispanRegionFactoryTestCase {
|
|||
config.setFetchInMemoryState(false);
|
||||
manager.defineConfiguration("timestamps", config);
|
||||
TimestampsRegionImpl timestampsRegion = (TimestampsRegionImpl) factory.buildTimestampsRegion(timestamps, p);
|
||||
cache = timestampsRegion.getCacheAdapter();
|
||||
cache = timestampsRegion.getCache();
|
||||
assertFalse(factory.getTypeOverrides().get("timestamps").isExposeStatistics());
|
||||
assertFalse(cache.getConfiguration().isExposeJmxStatistics());
|
||||
|
||||
CollectionRegionImpl collectionRegion = (CollectionRegionImpl) factory.buildCollectionRegion("com.acme.Person.addresses", p, null);
|
||||
cache = collectionRegion.getCacheAdapter();
|
||||
cache = collectionRegion.getCache();
|
||||
assertFalse(factory.getTypeOverrides().get("collection").isExposeStatistics());
|
||||
assertFalse(cache.getConfiguration().isExposeJmxStatistics());
|
||||
} finally {
|
||||
|
|
|
@ -31,13 +31,12 @@ import org.hibernate.boot.registry.StandardServiceRegistryBuilder;
|
|||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.hibernate.cache.infinispan.collection.CollectionRegionImpl;
|
||||
import org.hibernate.cache.infinispan.entity.EntityRegionImpl;
|
||||
import org.hibernate.cache.infinispan.util.FlagAdapter;
|
||||
import org.hibernate.cache.infinispan.util.Caches;
|
||||
import org.hibernate.cache.spi.CacheDataDescription;
|
||||
import org.hibernate.cfg.Configuration;
|
||||
import org.hibernate.boot.registry.internal.StandardServiceRegistryImpl;
|
||||
import org.hibernate.test.cache.infinispan.util.CacheTestUtil;
|
||||
|
||||
import static org.hibernate.cache.infinispan.util.CacheHelper.withinTx;
|
||||
import org.infinispan.context.Flag;
|
||||
|
||||
/**
|
||||
* Defines the environment for a node.
|
||||
|
@ -120,27 +119,27 @@ public class NodeEnvironment {
|
|||
public void release() throws Exception {
|
||||
if ( entityRegionMap != null ) {
|
||||
for ( final EntityRegionImpl region : entityRegionMap.values() ) {
|
||||
withinTx(region.getTransactionManager(), new Callable<Void>() {
|
||||
Caches.withinTx(region.getTransactionManager(), new Callable<Void>() {
|
||||
@Override
|
||||
public Void call() throws Exception {
|
||||
region.getCacheAdapter().withFlags(FlagAdapter.CACHE_MODE_LOCAL).clear();
|
||||
region.getCache().withFlags(Flag.CACHE_MODE_LOCAL).clear();
|
||||
return null;
|
||||
}
|
||||
});
|
||||
region.getCacheAdapter().stop();
|
||||
region.getCache().stop();
|
||||
}
|
||||
entityRegionMap.clear();
|
||||
}
|
||||
if ( collectionRegionMap != null ) {
|
||||
for ( final CollectionRegionImpl collectionRegion : collectionRegionMap.values() ) {
|
||||
withinTx(collectionRegion.getTransactionManager(), new Callable<Void>() {
|
||||
Caches.withinTx(collectionRegion.getTransactionManager(), new Callable<Void>() {
|
||||
@Override
|
||||
public Void call() throws Exception {
|
||||
collectionRegion.getCacheAdapter().withFlags( FlagAdapter.CACHE_MODE_LOCAL ).clear();
|
||||
collectionRegion.getCache().withFlags(Flag.CACHE_MODE_LOCAL).clear();
|
||||
return null;
|
||||
}
|
||||
});
|
||||
collectionRegion.getCacheAdapter().stop();
|
||||
collectionRegion.getCache().stop();
|
||||
}
|
||||
collectionRegionMap.clear();
|
||||
}
|
||||
|
|
|
@ -32,12 +32,11 @@ import java.util.concurrent.TimeUnit;
|
|||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import javax.transaction.Transaction;
|
||||
import javax.transaction.TransactionManager;
|
||||
|
||||
import org.infinispan.manager.DefaultCacheManager;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.hibernate.cache.infinispan.access.PutFromLoadValidator;
|
||||
|
@ -56,7 +55,6 @@ import static org.junit.Assert.fail;
|
|||
* @author Galder Zamarreño
|
||||
* @version $Revision: $
|
||||
*/
|
||||
@Ignore
|
||||
public class PutFromLoadValidatorUnitTestCase {
|
||||
private Object KEY1 = "KEY1";
|
||||
|
||||
|
@ -67,16 +65,17 @@ public class PutFromLoadValidatorUnitTestCase {
|
|||
tm = DualNodeJtaTransactionManagerImpl.getInstance("test");
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
tm = null;
|
||||
try {
|
||||
DualNodeJtaTransactionManagerImpl.cleanupTransactions();
|
||||
}
|
||||
finally {
|
||||
DualNodeJtaTransactionManagerImpl.cleanupTransactionManagers();
|
||||
}
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
tm = null;
|
||||
try {
|
||||
DualNodeJtaTransactionManagerImpl.cleanupTransactions();
|
||||
}
|
||||
finally {
|
||||
DualNodeJtaTransactionManagerImpl.cleanupTransactionManagers();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNakedPut() throws Exception {
|
||||
nakedPutTest(false);
|
||||
|
@ -87,7 +86,9 @@ public class PutFromLoadValidatorUnitTestCase {
|
|||
}
|
||||
|
||||
private void nakedPutTest(boolean transactional) throws Exception {
|
||||
PutFromLoadValidator testee = new PutFromLoadValidator(transactional ? tm : null);
|
||||
PutFromLoadValidator testee = new PutFromLoadValidator(
|
||||
new DefaultCacheManager(), transactional ? tm : null,
|
||||
PutFromLoadValidator.NAKED_PUT_INVALIDATION_PERIOD);
|
||||
if (transactional) {
|
||||
tm.begin();
|
||||
}
|
||||
|
@ -111,8 +112,8 @@ public class PutFromLoadValidatorUnitTestCase {
|
|||
}
|
||||
|
||||
private void registeredPutTest(boolean transactional) throws Exception {
|
||||
PutFromLoadValidator testee = new PutFromLoadValidator(
|
||||
transactional ? tm : null);
|
||||
PutFromLoadValidator testee = new PutFromLoadValidator(new DefaultCacheManager(),
|
||||
transactional ? tm : null, PutFromLoadValidator.NAKED_PUT_INVALIDATION_PERIOD);
|
||||
if (transactional) {
|
||||
tm.begin();
|
||||
}
|
||||
|
@ -147,8 +148,8 @@ public class PutFromLoadValidatorUnitTestCase {
|
|||
|
||||
private void nakedPutAfterRemovalTest(boolean transactional, boolean removeRegion)
|
||||
throws Exception {
|
||||
PutFromLoadValidator testee = new PutFromLoadValidator(
|
||||
transactional ? tm : null);
|
||||
PutFromLoadValidator testee = new PutFromLoadValidator(new DefaultCacheManager(),
|
||||
transactional ? tm : null, PutFromLoadValidator.NAKED_PUT_INVALIDATION_PERIOD);
|
||||
if (removeRegion) {
|
||||
testee.invalidateRegion();
|
||||
} else {
|
||||
|
@ -187,8 +188,8 @@ public class PutFromLoadValidatorUnitTestCase {
|
|||
|
||||
private void registeredPutAfterRemovalTest(boolean transactional, boolean removeRegion)
|
||||
throws Exception {
|
||||
PutFromLoadValidator testee = new PutFromLoadValidator(
|
||||
transactional ? tm : null);
|
||||
PutFromLoadValidator testee = new PutFromLoadValidator(new DefaultCacheManager(),
|
||||
transactional ? tm : null, PutFromLoadValidator.NAKED_PUT_INVALIDATION_PERIOD);
|
||||
if (removeRegion) {
|
||||
testee.invalidateRegion();
|
||||
} else {
|
||||
|
@ -228,8 +229,8 @@ public class PutFromLoadValidatorUnitTestCase {
|
|||
|
||||
private void registeredPutWithInterveningRemovalTest(boolean transactional, boolean removeRegion)
|
||||
throws Exception {
|
||||
PutFromLoadValidator testee = new PutFromLoadValidator(
|
||||
transactional ? tm : null);
|
||||
PutFromLoadValidator testee = new PutFromLoadValidator(new DefaultCacheManager(),
|
||||
transactional ? tm : null, PutFromLoadValidator.NAKED_PUT_INVALIDATION_PERIOD);
|
||||
if (transactional) {
|
||||
tm.begin();
|
||||
}
|
||||
|
@ -269,7 +270,7 @@ public class PutFromLoadValidatorUnitTestCase {
|
|||
|
||||
private void delayedNakedPutAfterRemovalTest(boolean transactional, boolean removeRegion)
|
||||
throws Exception {
|
||||
PutFromLoadValidator testee = new TestValidator(transactional ? tm : null, 100, 1000, 500, 10000);
|
||||
PutFromLoadValidator testee = new TestValidator(transactional ? tm : null, 100);
|
||||
if (removeRegion) {
|
||||
testee.invalidateRegion();
|
||||
} else {
|
||||
|
@ -300,7 +301,9 @@ public class PutFromLoadValidatorUnitTestCase {
|
|||
}
|
||||
|
||||
private void multipleRegistrationtest(final boolean transactional) throws Exception {
|
||||
final PutFromLoadValidator testee = new PutFromLoadValidator(transactional ? tm : null);
|
||||
final PutFromLoadValidator testee = new PutFromLoadValidator(
|
||||
new DefaultCacheManager(), transactional ? tm : null,
|
||||
PutFromLoadValidator.NAKED_PUT_INVALIDATION_PERIOD);
|
||||
|
||||
final CountDownLatch registeredLatch = new CountDownLatch(3);
|
||||
final CountDownLatch finishedLatch = new CountDownLatch(3);
|
||||
|
@ -356,14 +359,14 @@ public class PutFromLoadValidatorUnitTestCase {
|
|||
*/
|
||||
@Test
|
||||
public void testRemovalCleanup() throws Exception {
|
||||
TestValidator testee = new TestValidator(null, 200, 1000, 500, 10000);
|
||||
TestValidator testee = new TestValidator(null, 200);
|
||||
testee.invalidateKey("KEY1");
|
||||
testee.invalidateKey("KEY2");
|
||||
expectRemovalLenth(2, testee, 3000l);
|
||||
expectRemovalLenth(2, testee, 60000l);
|
||||
assertEquals(2, testee.getRemovalQueueLength());
|
||||
expectRemovalLenth(2, testee, 3000l);
|
||||
expectRemovalLenth(2, testee, 60000l);
|
||||
assertEquals(2, testee.getRemovalQueueLength());
|
||||
expectRemovalLenth( 2, testee, 3000l );
|
||||
expectRemovalLenth( 2, testee, 60000l );
|
||||
}
|
||||
|
||||
private void expectRemovalLenth(int expectedLength, TestValidator testee, long timeout) throws InterruptedException {
|
||||
|
@ -383,132 +386,20 @@ public class PutFromLoadValidatorUnitTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Very much a white box test of the logic for ensuring pending put registrations get cleaned up.
|
||||
*
|
||||
* @throws Exception
|
||||
*/
|
||||
@Test
|
||||
public void testPendingPutCleanup() throws Exception {
|
||||
TestValidator testee = new TestValidator(tm, 5000, 600, 300, 900);
|
||||
|
||||
// Start with a regionRemoval so we can confirm at the end that all
|
||||
// registrations have been cleaned out
|
||||
testee.invalidateRegion();
|
||||
|
||||
testee.registerPendingPut("1");
|
||||
testee.registerPendingPut("2");
|
||||
testee.registerPendingPut("3");
|
||||
testee.registerPendingPut("4");
|
||||
testee.registerPendingPut("5");
|
||||
testee.registerPendingPut("6");
|
||||
testee.acquirePutFromLoadLock("6");
|
||||
testee.releasePutFromLoadLock("6");
|
||||
testee.acquirePutFromLoadLock("2");
|
||||
testee.releasePutFromLoadLock("2");
|
||||
// ppq = [1,2(c),3,4,5,6(c)]
|
||||
assertEquals(6, testee.getPendingPutQueueLength());
|
||||
assertEquals(0, testee.getOveragePendingPutQueueLength());
|
||||
|
||||
// Sleep past "pendingPutRecentPeriod"
|
||||
Thread.sleep(310);
|
||||
testee.registerPendingPut("7");
|
||||
// White box -- should have cleaned out 2 (completed) but
|
||||
// not gotten to 6 (also removed)
|
||||
// ppq = [1,3,4,5,6(c),7]
|
||||
assertEquals(0, testee.getOveragePendingPutQueueLength());
|
||||
assertEquals(6, testee.getPendingPutQueueLength());
|
||||
|
||||
// Sleep past "pendingPutOveragePeriod"
|
||||
Thread.sleep(310);
|
||||
testee.registerPendingPut("8");
|
||||
// White box -- should have cleaned out 6 (completed) and
|
||||
// moved 1, 3, 4 and 5 to overage queue
|
||||
// oppq = [1,3,4,5] ppq = [7,8]
|
||||
assertEquals(4, testee.getOveragePendingPutQueueLength());
|
||||
assertEquals(2, testee.getPendingPutQueueLength());
|
||||
|
||||
// Sleep past "maxPendingPutDelay"
|
||||
Thread.sleep(310);
|
||||
testee.acquirePutFromLoadLock("3");
|
||||
testee.releasePutFromLoadLock("3");
|
||||
// White box -- should have cleaned out 1 (overage) and
|
||||
// moved 7 to overage queue
|
||||
// oppq = [3(c),4,5,7] ppq=[8]
|
||||
assertEquals(4, testee.getOveragePendingPutQueueLength());
|
||||
assertEquals(1, testee.getPendingPutQueueLength());
|
||||
|
||||
// Sleep past "maxPendingPutDelay"
|
||||
Thread.sleep(310);
|
||||
tm.begin();
|
||||
testee.registerPendingPut("7");
|
||||
Transaction tx = tm.suspend();
|
||||
|
||||
// White box -- should have cleaned out 3 (completed)
|
||||
// and 4 (overage) and moved 8 to overage queue
|
||||
// We now have 5,7,8 in overage and 7tx in pending
|
||||
// oppq = [5,7,8] ppq=[7tx]
|
||||
assertEquals(3, testee.getOveragePendingPutQueueLength());
|
||||
assertEquals(1, testee.getPendingPutQueueLength());
|
||||
|
||||
// Validate that only expected items can do puts, thus indirectly
|
||||
// proving the others have been cleaned out of pendingPuts map
|
||||
boolean locked = testee.acquirePutFromLoadLock("1");
|
||||
if (locked) {
|
||||
testee.releasePutFromLoadLock("1");
|
||||
}
|
||||
assertFalse(locked);
|
||||
// 5 was overage, so should have been cleaned
|
||||
assertEquals(2, testee.getOveragePendingPutQueueLength());
|
||||
locked = testee.acquirePutFromLoadLock("2");
|
||||
if (locked) {
|
||||
testee.releasePutFromLoadLock("1");
|
||||
}
|
||||
assertFalse(locked);
|
||||
// 7 was overage, so should have been cleaned
|
||||
assertEquals(1, testee.getOveragePendingPutQueueLength());
|
||||
locked = testee.acquirePutFromLoadLock("3");
|
||||
if (locked) {
|
||||
testee.releasePutFromLoadLock("1");
|
||||
}
|
||||
assertFalse(locked);
|
||||
locked = testee.acquirePutFromLoadLock("4");
|
||||
if (locked) {
|
||||
testee.releasePutFromLoadLock("1");
|
||||
}
|
||||
assertFalse(locked);
|
||||
locked = testee.acquirePutFromLoadLock("5");
|
||||
if (locked) {
|
||||
testee.releasePutFromLoadLock("1");
|
||||
}
|
||||
assertFalse(locked);
|
||||
locked = testee.acquirePutFromLoadLock("1");
|
||||
if (locked) {
|
||||
testee.releasePutFromLoadLock("1");
|
||||
}
|
||||
assertFalse(testee.acquirePutFromLoadLock("6"));
|
||||
locked = testee.acquirePutFromLoadLock("7");
|
||||
if (locked) {
|
||||
testee.releasePutFromLoadLock("1");
|
||||
}
|
||||
assertFalse(locked);
|
||||
assertTrue(testee.acquirePutFromLoadLock("8"));
|
||||
testee.releasePutFromLoadLock("8");
|
||||
tm.resume(tx);
|
||||
assertTrue(testee.acquirePutFromLoadLock("7"));
|
||||
testee.releasePutFromLoadLock("7");
|
||||
}
|
||||
@Test
|
||||
public void testInvalidateKeyBlocksForInProgressPut() throws Exception {
|
||||
invalidationBlocksForInProgressPutTest(true);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInvalidateRegionBlocksForInProgressPut() throws Exception {
|
||||
invalidationBlocksForInProgressPutTest(false);
|
||||
}
|
||||
|
||||
private void invalidationBlocksForInProgressPutTest(final boolean keyOnly) throws Exception {
|
||||
final PutFromLoadValidator testee = new PutFromLoadValidator(null);
|
||||
final PutFromLoadValidator testee = new PutFromLoadValidator(
|
||||
new DefaultCacheManager(), null,
|
||||
PutFromLoadValidator.NAKED_PUT_INVALIDATION_PERIOD);
|
||||
final CountDownLatch removeLatch = new CountDownLatch(1);
|
||||
final CountDownLatch pferLatch = new CountDownLatch(1);
|
||||
final AtomicReference<Object> cache = new AtomicReference<Object>("INITIAL");
|
||||
|
@ -566,22 +457,9 @@ public class PutFromLoadValidatorUnitTestCase {
|
|||
private static class TestValidator extends PutFromLoadValidator {
|
||||
|
||||
protected TestValidator(TransactionManager transactionManager,
|
||||
long nakedPutInvalidationPeriod, long pendingPutOveragePeriod,
|
||||
long pendingPutRecentPeriod, long maxPendingPutDelay) {
|
||||
super(transactionManager, nakedPutInvalidationPeriod, pendingPutOveragePeriod,
|
||||
pendingPutRecentPeriod, maxPendingPutDelay);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getOveragePendingPutQueueLength() {
|
||||
// TODO Auto-generated method stub
|
||||
return super.getOveragePendingPutQueueLength();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getPendingPutQueueLength() {
|
||||
// TODO Auto-generated method stub
|
||||
return super.getPendingPutQueueLength();
|
||||
long nakedPutInvalidationPeriod) {
|
||||
super(new DefaultCacheManager(),
|
||||
transactionManager, nakedPutInvalidationPeriod);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -32,6 +32,8 @@ import java.util.concurrent.TimeUnit;
|
|||
import javax.transaction.TransactionManager;
|
||||
|
||||
import junit.framework.AssertionFailedError;
|
||||
import org.hibernate.cache.infinispan.util.Caches;
|
||||
import org.infinispan.manager.DefaultCacheManager;
|
||||
import org.infinispan.transaction.tm.BatchModeTransactionManager;
|
||||
import org.jboss.logging.Logger;
|
||||
import org.junit.After;
|
||||
|
@ -42,7 +44,6 @@ import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
|||
import org.hibernate.cache.infinispan.access.PutFromLoadValidator;
|
||||
import org.hibernate.cache.infinispan.access.TransactionalAccessDelegate;
|
||||
import org.hibernate.cache.infinispan.collection.CollectionRegionImpl;
|
||||
import org.hibernate.cache.infinispan.util.CacheHelper;
|
||||
import org.hibernate.cache.internal.CacheDataDescriptionImpl;
|
||||
import org.hibernate.cache.spi.CacheDataDescription;
|
||||
import org.hibernate.cache.spi.access.AccessType;
|
||||
|
@ -103,8 +104,8 @@ public abstract class AbstractCollectionRegionAccessStrategyTestCase extends Abs
|
|||
localCollectionRegion = localEnvironment.getCollectionRegion( REGION_NAME, getCacheDataDescription() );
|
||||
localAccessStrategy = localCollectionRegion.buildAccessStrategy( getAccessType() );
|
||||
|
||||
invalidation = localCollectionRegion.getCacheAdapter().isClusteredInvalidation();
|
||||
synchronous = localCollectionRegion.getCacheAdapter().isSynchronous();
|
||||
invalidation = Caches.isInvalidationCache(localCollectionRegion.getCache());
|
||||
synchronous = Caches.isSynchronousCache(localCollectionRegion.getCache());
|
||||
|
||||
// Sleep a bit to avoid concurrent FLUSH problem
|
||||
avoidConcurrentFlush();
|
||||
|
@ -161,7 +162,8 @@ public abstract class AbstractCollectionRegionAccessStrategyTestCase extends Abs
|
|||
final CountDownLatch pferLatch = new CountDownLatch( 1 );
|
||||
final CountDownLatch removeLatch = new CountDownLatch( 1 );
|
||||
final TransactionManager remoteTm = remoteCollectionRegion.getTransactionManager();
|
||||
PutFromLoadValidator validator = new PutFromLoadValidator(remoteTm) {
|
||||
PutFromLoadValidator validator = new PutFromLoadValidator(
|
||||
new DefaultCacheManager(), remoteTm, 20000) {
|
||||
@Override
|
||||
public boolean acquirePutFromLoadLock(Object key) {
|
||||
boolean acquired = super.acquirePutFromLoadLock( key );
|
||||
|
@ -195,7 +197,7 @@ public abstract class AbstractCollectionRegionAccessStrategyTestCase extends Abs
|
|||
Callable<Void> removeCallable = new Callable<Void>() {
|
||||
public Void call() throws Exception {
|
||||
removeLatch.await();
|
||||
CacheHelper.withinTx(localTm, new Callable<Void>() {
|
||||
Caches.withinTx(localTm, new Callable<Void>() {
|
||||
@Override
|
||||
public Void call() throws Exception {
|
||||
delegate.remove("k1");
|
||||
|
@ -214,7 +216,7 @@ public abstract class AbstractCollectionRegionAccessStrategyTestCase extends Abs
|
|||
pferFuture.get();
|
||||
removeFuture.get();
|
||||
|
||||
assertFalse( localCollectionRegion.getCacheAdapter().containsKey( "k1" ) );
|
||||
assertFalse(localCollectionRegion.getCache().containsKey("k1"));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -394,7 +396,7 @@ public abstract class AbstractCollectionRegionAccessStrategyTestCase extends Abs
|
|||
// Wait for async propagation
|
||||
sleep( 250 );
|
||||
|
||||
CacheHelper.withinTx(localCollectionRegion.getTransactionManager(), new Callable<Void>() {
|
||||
Caches.withinTx(localCollectionRegion.getTransactionManager(), new Callable<Void>() {
|
||||
@Override
|
||||
public Void call() throws Exception {
|
||||
if (evict)
|
||||
|
@ -414,9 +416,9 @@ public abstract class AbstractCollectionRegionAccessStrategyTestCase extends Abs
|
|||
|
||||
final String KEY = KEY_BASE + testCount++;
|
||||
|
||||
assertEquals( 0, getValidKeyCount( localCollectionRegion.getCacheAdapter().keySet() ) );
|
||||
assertEquals( 0, getValidKeyCount( localCollectionRegion.getCache().keySet() ) );
|
||||
|
||||
assertEquals( 0, getValidKeyCount( remoteCollectionRegion.getCacheAdapter().keySet() ) );
|
||||
assertEquals( 0, getValidKeyCount( remoteCollectionRegion.getCache().keySet() ) );
|
||||
|
||||
assertNull( "local is clean", localAccessStrategy.get( KEY, System.currentTimeMillis() ) );
|
||||
assertNull( "remote is clean", remoteAccessStrategy.get( KEY, System.currentTimeMillis() ) );
|
||||
|
@ -429,7 +431,7 @@ public abstract class AbstractCollectionRegionAccessStrategyTestCase extends Abs
|
|||
// Wait for async propagation
|
||||
sleep( 250 );
|
||||
|
||||
CacheHelper.withinTx(localCollectionRegion.getTransactionManager(), new Callable<Void>() {
|
||||
Caches.withinTx(localCollectionRegion.getTransactionManager(), new Callable<Void>() {
|
||||
@Override
|
||||
public Void call() throws Exception {
|
||||
if (evict)
|
||||
|
@ -443,19 +445,19 @@ public abstract class AbstractCollectionRegionAccessStrategyTestCase extends Abs
|
|||
// This should re-establish the region root node
|
||||
assertNull( localAccessStrategy.get( KEY, System.currentTimeMillis() ) );
|
||||
|
||||
assertEquals( 0, getValidKeyCount( localCollectionRegion.getCacheAdapter().keySet() ) );
|
||||
assertEquals( 0, getValidKeyCount( localCollectionRegion.getCache().keySet() ) );
|
||||
|
||||
// Re-establishing the region root on the local node doesn't
|
||||
// propagate it to other nodes. Do a get on the remote node to re-establish
|
||||
assertEquals( null, remoteAccessStrategy.get( KEY, System.currentTimeMillis() ) );
|
||||
|
||||
assertEquals( 0, getValidKeyCount( remoteCollectionRegion.getCacheAdapter().keySet() ) );
|
||||
assertEquals( 0, getValidKeyCount( remoteCollectionRegion.getCache().keySet() ) );
|
||||
|
||||
// Test whether the get above messes up the optimistic version
|
||||
remoteAccessStrategy.putFromLoad( KEY, VALUE1, System.currentTimeMillis(), new Integer( 1 ) );
|
||||
assertEquals( VALUE1, remoteAccessStrategy.get( KEY, System.currentTimeMillis() ) );
|
||||
|
||||
assertEquals( 1, getValidKeyCount( remoteCollectionRegion.getCacheAdapter().keySet() ) );
|
||||
assertEquals( 1, getValidKeyCount( remoteCollectionRegion.getCache().keySet() ) );
|
||||
|
||||
// Wait for async propagation of the putFromLoad
|
||||
sleep( 250 );
|
||||
|
|
|
@ -27,8 +27,6 @@ import java.util.Properties;
|
|||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.hibernate.cache.infinispan.util.CacheAdapter;
|
||||
import org.hibernate.cache.infinispan.util.CacheAdapterImpl;
|
||||
import org.hibernate.cache.spi.CacheDataDescription;
|
||||
import org.hibernate.cache.spi.CollectionRegion;
|
||||
import org.hibernate.cache.spi.Region;
|
||||
|
@ -36,6 +34,7 @@ import org.hibernate.cache.spi.RegionFactory;
|
|||
import org.hibernate.cache.spi.access.AccessType;
|
||||
import org.hibernate.cache.spi.access.CollectionRegionAccessStrategy;
|
||||
import org.hibernate.test.cache.infinispan.AbstractEntityCollectionRegionTestCase;
|
||||
import org.infinispan.AdvancedCache;
|
||||
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.fail;
|
||||
|
@ -70,8 +69,8 @@ public class CollectionRegionImplTestCase extends AbstractEntityCollectionRegion
|
|||
}
|
||||
|
||||
@Override
|
||||
protected CacheAdapter getInfinispanCache(InfinispanRegionFactory regionFactory) {
|
||||
return CacheAdapterImpl.newInstance(regionFactory.getCacheManager().getCache(InfinispanRegionFactory.DEF_ENTITY_RESOURCE).getAdvancedCache());
|
||||
protected AdvancedCache getInfinispanCache(InfinispanRegionFactory regionFactory) {
|
||||
return regionFactory.getCacheManager().getCache(InfinispanRegionFactory.DEF_ENTITY_RESOURCE).getAdvancedCache();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -29,6 +29,7 @@ import java.util.concurrent.CountDownLatch;
|
|||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import junit.framework.AssertionFailedError;
|
||||
import org.hibernate.cache.infinispan.util.Caches;
|
||||
import org.infinispan.Cache;
|
||||
import org.infinispan.test.TestingUtil;
|
||||
import org.infinispan.transaction.tm.BatchModeTransactionManager;
|
||||
|
@ -39,7 +40,6 @@ import org.junit.Test;
|
|||
|
||||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.hibernate.cache.infinispan.entity.EntityRegionImpl;
|
||||
import org.hibernate.cache.infinispan.util.CacheHelper;
|
||||
import org.hibernate.cache.internal.CacheDataDescriptionImpl;
|
||||
import org.hibernate.cache.spi.CacheDataDescription;
|
||||
import org.hibernate.cache.spi.access.AccessType;
|
||||
|
@ -97,8 +97,8 @@ public abstract class AbstractEntityRegionAccessStrategyTestCase extends Abstrac
|
|||
localEntityRegion = localEnvironment.getEntityRegion( REGION_NAME, getCacheDataDescription() );
|
||||
localAccessStrategy = localEntityRegion.buildAccessStrategy( getAccessType() );
|
||||
|
||||
invalidation = localEntityRegion.getCacheAdapter().isClusteredInvalidation();
|
||||
synchronous = localEntityRegion.getCacheAdapter().isSynchronous();
|
||||
invalidation = Caches.isInvalidationCache(localEntityRegion.getCache());
|
||||
synchronous = Caches.isSynchronousCache(localEntityRegion.getCache());
|
||||
|
||||
// Sleep a bit to avoid concurrent FLUSH problem
|
||||
avoidConcurrentFlush();
|
||||
|
@ -109,8 +109,8 @@ public abstract class AbstractEntityRegionAccessStrategyTestCase extends Abstrac
|
|||
remoteEntityRegion = remoteEnvironment.getEntityRegion( REGION_NAME, getCacheDataDescription() );
|
||||
remoteAccessStrategy = remoteEntityRegion.buildAccessStrategy( getAccessType() );
|
||||
|
||||
waitForClusterToForm(localEntityRegion.getCacheAdapter().getCache(),
|
||||
remoteEntityRegion.getCacheAdapter().getCache());
|
||||
waitForClusterToForm(localEntityRegion.getCache(),
|
||||
remoteEntityRegion.getCache());
|
||||
}
|
||||
|
||||
protected void waitForClusterToForm(Cache... caches) {
|
||||
|
@ -534,8 +534,8 @@ public abstract class AbstractEntityRegionAccessStrategyTestCase extends Abstrac
|
|||
|
||||
private void evictOrRemoveTest(final boolean evict) throws Exception {
|
||||
final String KEY = KEY_BASE + testCount++;
|
||||
assertEquals( 0, getValidKeyCount( localEntityRegion.getCacheAdapter().keySet() ) );
|
||||
assertEquals( 0, getValidKeyCount( remoteEntityRegion.getCacheAdapter().keySet() ) );
|
||||
assertEquals( 0, getValidKeyCount( localEntityRegion.getCache().keySet() ) );
|
||||
assertEquals( 0, getValidKeyCount( remoteEntityRegion.getCache().keySet() ) );
|
||||
|
||||
assertNull( "local is clean", localAccessStrategy.get( KEY, System.currentTimeMillis() ) );
|
||||
assertNull( "remote is clean", remoteAccessStrategy.get( KEY, System.currentTimeMillis() ) );
|
||||
|
@ -545,7 +545,7 @@ public abstract class AbstractEntityRegionAccessStrategyTestCase extends Abstrac
|
|||
remoteAccessStrategy.putFromLoad( KEY, VALUE1, System.currentTimeMillis(), new Integer( 1 ) );
|
||||
assertEquals( VALUE1, remoteAccessStrategy.get( KEY, System.currentTimeMillis() ) );
|
||||
|
||||
CacheHelper.withinTx(localEntityRegion.getTransactionManager(), new Callable<Void>() {
|
||||
Caches.withinTx(localEntityRegion.getTransactionManager(), new Callable<Void>() {
|
||||
@Override
|
||||
public Void call() throws Exception {
|
||||
if ( evict )
|
||||
|
@ -556,15 +556,15 @@ public abstract class AbstractEntityRegionAccessStrategyTestCase extends Abstrac
|
|||
}
|
||||
});
|
||||
assertEquals(null, localAccessStrategy.get(KEY, System.currentTimeMillis()));
|
||||
assertEquals( 0, getValidKeyCount( localEntityRegion.getCacheAdapter().keySet() ) );
|
||||
assertEquals( 0, getValidKeyCount( localEntityRegion.getCache().keySet() ) );
|
||||
assertEquals( null, remoteAccessStrategy.get( KEY, System.currentTimeMillis() ) );
|
||||
assertEquals( 0, getValidKeyCount( remoteEntityRegion.getCacheAdapter().keySet() ) );
|
||||
assertEquals( 0, getValidKeyCount( remoteEntityRegion.getCache().keySet() ) );
|
||||
}
|
||||
|
||||
private void evictOrRemoveAllTest(final boolean evict) throws Exception {
|
||||
final String KEY = KEY_BASE + testCount++;
|
||||
assertEquals( 0, getValidKeyCount( localEntityRegion.getCacheAdapter().keySet() ) );
|
||||
assertEquals( 0, getValidKeyCount( remoteEntityRegion.getCacheAdapter().keySet() ) );
|
||||
assertEquals( 0, getValidKeyCount( localEntityRegion.getCache().keySet() ) );
|
||||
assertEquals( 0, getValidKeyCount( remoteEntityRegion.getCache().keySet() ) );
|
||||
assertNull( "local is clean", localAccessStrategy.get( KEY, System.currentTimeMillis() ) );
|
||||
assertNull( "remote is clean", remoteAccessStrategy.get( KEY, System.currentTimeMillis() ) );
|
||||
|
||||
|
@ -580,7 +580,7 @@ public abstract class AbstractEntityRegionAccessStrategyTestCase extends Abstrac
|
|||
// Wait for async propagation
|
||||
sleep( 250 );
|
||||
|
||||
CacheHelper.withinTx(localEntityRegion.getTransactionManager(), new Callable<Void>() {
|
||||
Caches.withinTx(localEntityRegion.getTransactionManager(), new Callable<Void>() {
|
||||
@Override
|
||||
public Void call() throws Exception {
|
||||
if (evict) {
|
||||
|
@ -595,17 +595,17 @@ public abstract class AbstractEntityRegionAccessStrategyTestCase extends Abstrac
|
|||
|
||||
// This should re-establish the region root node in the optimistic case
|
||||
assertNull(localAccessStrategy.get(KEY, System.currentTimeMillis()));
|
||||
assertEquals( 0, getValidKeyCount( localEntityRegion.getCacheAdapter().keySet() ) );
|
||||
assertEquals( 0, getValidKeyCount( localEntityRegion.getCache().keySet() ) );
|
||||
|
||||
// Re-establishing the region root on the local node doesn't
|
||||
// propagate it to other nodes. Do a get on the remote node to re-establish
|
||||
assertEquals( null, remoteAccessStrategy.get( KEY, System.currentTimeMillis() ) );
|
||||
assertEquals( 0, getValidKeyCount( remoteEntityRegion.getCacheAdapter().keySet() ) );
|
||||
assertEquals( 0, getValidKeyCount( remoteEntityRegion.getCache().keySet() ) );
|
||||
|
||||
// Test whether the get above messes up the optimistic version
|
||||
remoteAccessStrategy.putFromLoad( KEY, VALUE1, System.currentTimeMillis(), new Integer( 1 ) );
|
||||
assertEquals( VALUE1, remoteAccessStrategy.get( KEY, System.currentTimeMillis() ) );
|
||||
assertEquals( 1, getValidKeyCount( remoteEntityRegion.getCacheAdapter().keySet() ) );
|
||||
assertEquals( 1, getValidKeyCount( remoteEntityRegion.getCache().keySet() ) );
|
||||
|
||||
// Wait for async propagation
|
||||
sleep( 250 );
|
||||
|
|
|
@ -27,14 +27,13 @@ import java.util.Properties;
|
|||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.hibernate.cache.infinispan.util.CacheAdapter;
|
||||
import org.hibernate.cache.infinispan.util.CacheAdapterImpl;
|
||||
import org.hibernate.cache.spi.CacheDataDescription;
|
||||
import org.hibernate.cache.spi.EntityRegion;
|
||||
import org.hibernate.cache.spi.Region;
|
||||
import org.hibernate.cache.spi.RegionFactory;
|
||||
import org.hibernate.cache.spi.access.AccessType;
|
||||
import org.hibernate.test.cache.infinispan.AbstractEntityCollectionRegionTestCase;
|
||||
import org.infinispan.AdvancedCache;
|
||||
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.fail;
|
||||
|
@ -81,8 +80,9 @@ public class EntityRegionImplTestCase extends AbstractEntityCollectionRegionTest
|
|||
}
|
||||
|
||||
@Override
|
||||
protected CacheAdapter getInfinispanCache(InfinispanRegionFactory regionFactory) {
|
||||
return CacheAdapterImpl.newInstance(regionFactory.getCacheManager().getCache(InfinispanRegionFactory.DEF_ENTITY_RESOURCE).getAdvancedCache());
|
||||
protected AdvancedCache getInfinispanCache(InfinispanRegionFactory regionFactory) {
|
||||
return regionFactory.getCacheManager().getCache(
|
||||
InfinispanRegionFactory.DEF_ENTITY_RESOURCE).getAdvancedCache();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* Copyright (c) 2007, Red Hat, Inc. and/or it's affiliates or third-party contributors as
|
||||
* Copyright (c) 2012, Red Hat, Inc or third-party contributors as
|
||||
* indicated by the @author tags or express copyright attribution
|
||||
* statements applied by the authors. All third-party contributions are
|
||||
* distributed under license by Red Hat, Inc. and/or it's affiliates.
|
||||
* distributed under license by Red Hat Middleware LLC.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use, modify,
|
||||
* copy, or redistribute it subject to the terms and conditions of the GNU
|
||||
|
@ -21,37 +21,41 @@
|
|||
* 51 Franklin Street, Fifth Floor
|
||||
* Boston, MA 02110-1301 USA
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.util;
|
||||
|
||||
import java.util.concurrent.Callable;
|
||||
import javax.transaction.Status;
|
||||
import javax.transaction.TransactionManager;
|
||||
package org.hibernate.test.cache.infinispan.functional;
|
||||
import javax.persistence.Entity;
|
||||
import javax.persistence.GeneratedValue;
|
||||
import javax.persistence.Id;
|
||||
import javax.persistence.NamedQueries;
|
||||
import javax.persistence.NamedQuery;
|
||||
|
||||
/**
|
||||
* Helper for dealing with Infinisan cache instances.
|
||||
*
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
public class CacheHelper {
|
||||
@NamedQueries({@NamedQuery(name=Age.QUERY, query = "SELECT a FROM Age a")})
|
||||
@Entity
|
||||
public class Age {
|
||||
|
||||
/**
|
||||
* Disallow external instantiation of CacheHelper.
|
||||
*/
|
||||
private CacheHelper() {
|
||||
public static final String QUERY = "Age.findAll";
|
||||
|
||||
@Id
|
||||
@GeneratedValue
|
||||
private Integer id;
|
||||
private Integer age;
|
||||
|
||||
public Integer getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId(Integer id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
public Integer getAge() {
|
||||
return age;
|
||||
}
|
||||
|
||||
public static <T> T withinTx(TransactionManager tm, Callable<T> c) throws Exception {
|
||||
tm.begin();
|
||||
try {
|
||||
return c.call();
|
||||
} catch (Exception e) {
|
||||
tm.setRollbackOnly();
|
||||
throw e;
|
||||
} finally {
|
||||
if (tm.getStatus() == Status.STATUS_ACTIVE) tm.commit();
|
||||
else tm.rollback();
|
||||
}
|
||||
public void setAge(Integer age) {
|
||||
this.age = age;
|
||||
}
|
||||
|
||||
}
|
|
@ -29,6 +29,8 @@ import java.util.concurrent.CountDownLatch;
|
|||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import junit.framework.AssertionFailedError;
|
||||
import org.hibernate.cache.infinispan.util.Caches;
|
||||
import org.infinispan.AdvancedCache;
|
||||
import org.infinispan.notifications.Listener;
|
||||
import org.infinispan.notifications.cachelistener.annotation.CacheEntryVisited;
|
||||
import org.infinispan.notifications.cachelistener.event.CacheEntryVisitedEvent;
|
||||
|
@ -38,9 +40,6 @@ import org.jboss.logging.Logger;
|
|||
|
||||
import org.hibernate.boot.registry.StandardServiceRegistryBuilder;
|
||||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.hibernate.cache.infinispan.util.CacheAdapter;
|
||||
import org.hibernate.cache.infinispan.util.CacheAdapterImpl;
|
||||
import org.hibernate.cache.infinispan.util.CacheHelper;
|
||||
import org.hibernate.cache.internal.StandardQueryCache;
|
||||
import org.hibernate.cache.spi.CacheDataDescription;
|
||||
import org.hibernate.cache.spi.GeneralDataRegion;
|
||||
|
@ -80,7 +79,7 @@ public class QueryRegionImplTestCase extends AbstractGeneralDataRegionTestCase {
|
|||
|
||||
@Override
|
||||
protected void regionPut(final GeneralDataRegion region) throws Exception {
|
||||
CacheHelper.withinTx(BatchModeTransactionManager.getInstance(), new Callable<Void>() {
|
||||
Caches.withinTx(BatchModeTransactionManager.getInstance(), new Callable<Void>() {
|
||||
@Override
|
||||
public Void call() throws Exception {
|
||||
region.put(KEY, VALUE1);
|
||||
|
@ -91,7 +90,7 @@ public class QueryRegionImplTestCase extends AbstractGeneralDataRegionTestCase {
|
|||
|
||||
@Override
|
||||
protected void regionEvict(final GeneralDataRegion region) throws Exception {
|
||||
CacheHelper.withinTx(BatchModeTransactionManager.getInstance(), new Callable<Void>() {
|
||||
Caches.withinTx(BatchModeTransactionManager.getInstance(), new Callable<Void>() {
|
||||
@Override
|
||||
public Void call() throws Exception {
|
||||
region.evict(KEY);
|
||||
|
@ -101,8 +100,8 @@ public class QueryRegionImplTestCase extends AbstractGeneralDataRegionTestCase {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected CacheAdapter getInfinispanCache(InfinispanRegionFactory regionFactory) {
|
||||
return CacheAdapterImpl.newInstance(regionFactory.getCacheManager().getCache( "local-query" ).getAdvancedCache());
|
||||
protected AdvancedCache getInfinispanCache(InfinispanRegionFactory regionFactory) {
|
||||
return regionFactory.getCacheManager().getCache( "local-query" ).getAdvancedCache();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -230,7 +229,7 @@ public class QueryRegionImplTestCase extends AbstractGeneralDataRegionTestCase {
|
|||
assertEquals( VALUE1, region.get( KEY ) );
|
||||
|
||||
// final Fqn rootFqn = getRegionFqn(getStandardRegionName(REGION_PREFIX), REGION_PREFIX);
|
||||
final CacheAdapter jbc = getInfinispanCache( regionFactory );
|
||||
final AdvancedCache jbc = getInfinispanCache(regionFactory);
|
||||
|
||||
final CountDownLatch blockerLatch = new CountDownLatch( 1 );
|
||||
final CountDownLatch writerLatch = new CountDownLatch( 1 );
|
||||
|
|
|
@ -0,0 +1,343 @@
|
|||
package org.hibernate.test.cache.infinispan.stress;
|
||||
|
||||
import org.hibernate.Query;
|
||||
import org.hibernate.Session;
|
||||
import org.hibernate.SessionFactory;
|
||||
import org.hibernate.boot.registry.internal.StandardServiceRegistryImpl;
|
||||
import org.hibernate.cfg.Configuration;
|
||||
import org.hibernate.cfg.Environment;
|
||||
import org.hibernate.mapping.Collection;
|
||||
import org.hibernate.mapping.PersistentClass;
|
||||
import org.hibernate.test.cache.infinispan.functional.Age;
|
||||
import org.hibernate.testing.ServiceRegistryBuilder;
|
||||
import org.infinispan.util.logging.Log;
|
||||
import org.infinispan.util.logging.LogFactory;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
|
||||
import javax.transaction.TransactionManager;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.CyclicBarrier;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import static org.infinispan.test.TestingUtil.withTx;
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
/**
|
||||
* A stress test for putFromLoad operations
|
||||
*
|
||||
* @author Galder Zamarreño
|
||||
* @since 4.1
|
||||
*/
|
||||
@Ignore
|
||||
public class PutFromLoadStressTestCase {
|
||||
|
||||
static final Log log = LogFactory.getLog(PutFromLoadStressTestCase.class);
|
||||
static final boolean isTrace = log.isTraceEnabled();
|
||||
static final int NUM_THREADS = 100;
|
||||
static final int WARMUP_TIME_SECS = 10;
|
||||
static final long RUNNING_TIME_SECS = Integer.getInteger("time", 60);
|
||||
static final long LAUNCH_INTERVAL_MILLIS = 10;
|
||||
|
||||
static final int NUM_INSTANCES = 5000;
|
||||
|
||||
static SessionFactory sessionFactory;
|
||||
static TransactionManager tm;
|
||||
|
||||
final AtomicBoolean run = new AtomicBoolean(true);
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeClass() {
|
||||
Configuration cfg = new Configuration();
|
||||
cfg.setProperty(Environment.USE_SECOND_LEVEL_CACHE, "true");
|
||||
cfg.setProperty(Environment.USE_QUERY_CACHE, "true");
|
||||
// TODO: Tweak to have a fully local region factory (no transport, cache mode = local, no marshalling, ...etc)
|
||||
cfg.setProperty(Environment.CACHE_REGION_FACTORY,
|
||||
"org.hibernate.cache.infinispan.InfinispanRegionFactory");
|
||||
cfg.setProperty(Environment.JTA_PLATFORM,
|
||||
"org.hibernate.service.jta.platform.internal.JBossStandAloneJtaPlatform");
|
||||
|
||||
// Force minimal puts off to simplify stressing putFromLoad logic
|
||||
cfg.setProperty(Environment.USE_MINIMAL_PUTS, "false");
|
||||
|
||||
// Mappings
|
||||
configureMappings(cfg);
|
||||
|
||||
// // Database settings
|
||||
// cfg.setProperty(Environment.DRIVER, "org.postgresql.Driver");
|
||||
// cfg.setProperty(Environment.URL, "jdbc:postgresql://localhost/hibernate");
|
||||
// cfg.setProperty(Environment.DIALECT, "org.hibernate.dialect.PostgreSQL82Dialect");
|
||||
// cfg.setProperty(Environment.USER, "hbadmin");
|
||||
// cfg.setProperty(Environment.PASS, "hbadmin");
|
||||
|
||||
// Create database schema in each run
|
||||
cfg.setProperty(Environment.HBM2DDL_AUTO, "create-drop");
|
||||
|
||||
StandardServiceRegistryImpl registry =
|
||||
ServiceRegistryBuilder.buildServiceRegistry(cfg.getProperties());
|
||||
sessionFactory = cfg.buildSessionFactory(registry);
|
||||
|
||||
tm = com.arjuna.ats.jta.TransactionManager.transactionManager();
|
||||
}
|
||||
|
||||
private static void configureMappings(Configuration cfg) {
|
||||
String[] mappings = {
|
||||
"cache/infinispan/functional/Item.hbm.xml",
|
||||
"cache/infinispan/functional/Customer.hbm.xml",
|
||||
"cache/infinispan/functional/Contact.hbm.xml"};
|
||||
for (String mapping : mappings)
|
||||
cfg.addResource("org/hibernate/test/" + mapping);
|
||||
|
||||
Class<?>[] annotatedClasses = getAnnotatedClasses();
|
||||
if ( annotatedClasses != null ) {
|
||||
for ( Class<?> annotatedClass : annotatedClasses ) {
|
||||
cfg.addAnnotatedClass( annotatedClass );
|
||||
}
|
||||
}
|
||||
|
||||
cfg.buildMappings();
|
||||
Iterator it = cfg.getClassMappings();
|
||||
String cacheStrategy = "transactional";
|
||||
while (it.hasNext()) {
|
||||
PersistentClass clazz = (PersistentClass) it.next();
|
||||
if (!clazz.isInherited()) {
|
||||
cfg.setCacheConcurrencyStrategy(clazz.getEntityName(), cacheStrategy);
|
||||
}
|
||||
}
|
||||
it = cfg.getCollectionMappings();
|
||||
while (it.hasNext()) {
|
||||
Collection coll = (Collection) it.next();
|
||||
cfg.setCollectionCacheConcurrencyStrategy( coll.getRole(), cacheStrategy);
|
||||
}
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void afterClass() {
|
||||
sessionFactory.close();
|
||||
}
|
||||
|
||||
public static Class<Object>[] getAnnotatedClasses() {
|
||||
return new Class[] {Age.class};
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testQueryPerformance() throws Exception {
|
||||
store();
|
||||
// doTest(true);
|
||||
// run.set(true); // Reset run
|
||||
doTest(false);
|
||||
}
|
||||
|
||||
private void store() throws Exception {
|
||||
for (int i = 0; i < NUM_INSTANCES; i++) {
|
||||
final Age age = new Age();
|
||||
age.setAge(i);
|
||||
withTx(tm, new Callable<Void>() {
|
||||
@Override
|
||||
public Void call() throws Exception {
|
||||
Session s = sessionFactory.openSession();
|
||||
s.getTransaction().begin();
|
||||
s.persist(age);
|
||||
s.getTransaction().commit();
|
||||
s.close();
|
||||
return null;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
private void doTest(boolean warmup) throws Exception {
|
||||
ExecutorService executor = Executors.newFixedThreadPool(NUM_THREADS);
|
||||
try {
|
||||
CyclicBarrier barrier = new CyclicBarrier(NUM_THREADS + 1);
|
||||
List<Future<String>> futures = new ArrayList<Future<String>>(NUM_THREADS);
|
||||
for (int i = 0; i < NUM_THREADS; i++) {
|
||||
Future<String> future = executor.submit(
|
||||
new SelectQueryRunner(barrier, warmup, i + 1));
|
||||
futures.add(future);
|
||||
Thread.sleep(LAUNCH_INTERVAL_MILLIS);
|
||||
}
|
||||
barrier.await(); // wait for all threads to be ready
|
||||
|
||||
long timeout = warmup ? WARMUP_TIME_SECS : RUNNING_TIME_SECS;
|
||||
TimeUnit unit = TimeUnit.SECONDS;
|
||||
|
||||
Thread.sleep(unit.toMillis(timeout)); // Wait for the duration of the test
|
||||
run.set(false); // Instruct tests to stop doing work
|
||||
barrier.await(2, TimeUnit.MINUTES); // wait for all threads to finish
|
||||
|
||||
log.infof("[%s] All threads finished, check for exceptions", title(warmup));
|
||||
for (Future<String> future : futures) {
|
||||
String opsPerMS = future.get();
|
||||
if (!warmup)
|
||||
log.infof("[%s] Operations/ms: %s", title(warmup), opsPerMS);
|
||||
}
|
||||
log.infof("[%s] All future gets checked", title(warmup));
|
||||
} catch (Exception e) {
|
||||
log.errorf(e, "Error in one of the execution threads during %s", title(warmup));
|
||||
throw e;
|
||||
} finally {
|
||||
executor.shutdownNow();
|
||||
}
|
||||
}
|
||||
|
||||
private String title(boolean warmup) {
|
||||
return warmup ? "warmup" : "stress";
|
||||
}
|
||||
|
||||
public class SelectQueryRunner implements Callable<String> {
|
||||
|
||||
final CyclicBarrier barrier;
|
||||
final boolean warmup;
|
||||
final Integer customerId;
|
||||
|
||||
public SelectQueryRunner(CyclicBarrier barrier, boolean warmup, Integer customerId) {
|
||||
this.barrier = barrier;
|
||||
this.warmup = warmup;
|
||||
this.customerId = customerId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String call() throws Exception {
|
||||
try {
|
||||
if (isTrace)
|
||||
log.tracef("[%s] Wait for all executions paths to be ready to perform calls", title(warmup));
|
||||
barrier.await();
|
||||
|
||||
long start = System.nanoTime();
|
||||
int runs = 0;
|
||||
if (isTrace)
|
||||
log.tracef("[%s] Start time: %d", title(warmup), start);
|
||||
|
||||
// while (USE_TIME && PutFromLoadStressTestCase.this.run.get()) {
|
||||
// if (runs % 100000 == 0)
|
||||
// log.infof("[%s] Query run # %d", title(warmup), runs);
|
||||
//
|
||||
//// Customer customer = query();
|
||||
//// deleteCached(customer);
|
||||
|
||||
queryItems();
|
||||
// deleteCachedItems();
|
||||
//
|
||||
// runs++;
|
||||
// }
|
||||
long end = System.nanoTime();
|
||||
long duration = end - start;
|
||||
if (isTrace)
|
||||
log.tracef("[%s] End time: %d, duration: %d, runs: %d",
|
||||
title(warmup), start, duration, runs);
|
||||
|
||||
return opsPerMS(duration, runs);
|
||||
} finally {
|
||||
if (isTrace)
|
||||
log.tracef("[%s] Wait for all execution paths to finish", title(warmup));
|
||||
|
||||
barrier.await();
|
||||
}
|
||||
}
|
||||
|
||||
private void deleteCachedItems() throws Exception {
|
||||
withTx(tm, new Callable<Void>() {
|
||||
@Override
|
||||
public Void call() throws Exception {
|
||||
sessionFactory.getCache().evictEntityRegion(Age.class);
|
||||
return null;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private void queryItems() throws Exception {
|
||||
withTx(tm, new Callable<Void>() {
|
||||
@Override
|
||||
public Void call() throws Exception {
|
||||
Session s = sessionFactory.getCurrentSession();
|
||||
Query query = s.getNamedQuery(Age.QUERY).setCacheable(true);
|
||||
// Query query = s.createQuery("from Age").setCacheable(true);
|
||||
List<Age> result = (List<Age>) query.list();
|
||||
assertFalse(result.isEmpty());
|
||||
return null;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
// private void deleteCachedItems() throws Exception {
|
||||
// withTx(tm, new Callable<Void>() {
|
||||
// @Override
|
||||
// public Void call() throws Exception {
|
||||
// sessionFactory.getCache().evictEntityRegion(Item.class);
|
||||
// return null;
|
||||
// }
|
||||
// });
|
||||
// }
|
||||
//
|
||||
// private void queryItems() throws Exception {
|
||||
// withTx(tm, new Callable<Void>() {
|
||||
// @Override
|
||||
// public Void call() throws Exception {
|
||||
// Session s = sessionFactory.getCurrentSession();
|
||||
// Query query = s.createQuery("from Item").setCacheable(true);
|
||||
// List<Item> result = (List<Item>) query.list();
|
||||
// assertFalse(result.isEmpty());
|
||||
// return null;
|
||||
// }
|
||||
// });
|
||||
// }
|
||||
|
||||
// private Customer query() throws Exception {
|
||||
// return withTx(tm, new Callable<Customer>() {
|
||||
// @Override
|
||||
// public Customer call() throws Exception {
|
||||
// Session s = sessionFactory.getCurrentSession();
|
||||
// Customer customer = (Customer) s.load(Customer.class, customerId);
|
||||
// assertNotNull(customer);
|
||||
// Set<Contact> contacts = customer.getContacts();
|
||||
// Contact contact = contacts.iterator().next();
|
||||
// assertNotNull(contact);
|
||||
// assertEquals("private contact", contact.getName());
|
||||
//
|
||||
//// Contact found = contacts.isEmpty() ? null : contacts.iterator().next();
|
||||
//// Set<Contact> contacts = found.getContacts();
|
||||
//// assertTrue(contacts + " not empty", contacts.isEmpty());
|
||||
////
|
||||
//// if (found != null && found.hashCode() == System.nanoTime()) {
|
||||
//// System.out.print(" ");
|
||||
//// } else if (found == null) {
|
||||
//// throw new IllegalStateException("Contact cannot be null");
|
||||
//// }
|
||||
// return customer;
|
||||
// }
|
||||
// });
|
||||
// }
|
||||
|
||||
// private void deleteCached(final Customer customer) throws Exception {
|
||||
// withTx(tm, new Callable<Void>() {
|
||||
// @Override
|
||||
// public Void call() throws Exception {
|
||||
// sessionFactory.getCache().evictEntity(Customer.class, customer.getId());
|
||||
// return null; // TODO: Customise this generated block
|
||||
// }
|
||||
// });
|
||||
// }
|
||||
|
||||
private String opsPerMS(long nanos, int ops) {
|
||||
long totalMillis = TimeUnit.NANOSECONDS.toMillis(nanos);
|
||||
if (totalMillis > 0)
|
||||
return ops / totalMillis + " ops/ms";
|
||||
else
|
||||
return "NAN ops/ms";
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -0,0 +1,626 @@
|
|||
/*
|
||||
* JBoss, Home of Professional Open Source
|
||||
* Copyright 2012 Red Hat Inc. and/or its affiliates and other
|
||||
* contributors as indicated by the @author tags. All rights reserved.
|
||||
* See the copyright.txt in the distribution for a full listing of
|
||||
* individual contributors.
|
||||
*
|
||||
* This is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU Lesser General Public License as
|
||||
* published by the Free Software Foundation; either version 2.1 of
|
||||
* the License, or (at your option) any later version.
|
||||
*
|
||||
* This software is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this software; if not, write to the Free
|
||||
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
||||
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
|
||||
*/
|
||||
|
||||
package org.hibernate.test.cache.infinispan.stress;
|
||||
|
||||
import org.hibernate.Query;
|
||||
import org.hibernate.Session;
|
||||
import org.hibernate.SessionFactory;
|
||||
import org.hibernate.boot.registry.internal.StandardServiceRegistryImpl;
|
||||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.hibernate.cfg.Configuration;
|
||||
import org.hibernate.cfg.Environment;
|
||||
import org.hibernate.mapping.Collection;
|
||||
import org.hibernate.mapping.PersistentClass;
|
||||
import org.hibernate.test.cache.infinispan.stress.entities.Address;
|
||||
import org.hibernate.test.cache.infinispan.stress.entities.Family;
|
||||
import org.hibernate.test.cache.infinispan.stress.entities.Person;
|
||||
import org.hibernate.testing.ServiceRegistryBuilder;
|
||||
import org.infinispan.util.concurrent.ConcurrentHashSet;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
|
||||
import javax.transaction.TransactionManager;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Queue;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.*;
|
||||
|
||||
import static org.infinispan.test.TestingUtil.withTx;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
* Stress test for second level cache.
|
||||
*
|
||||
* TODO Various:
|
||||
* - Switch to a JDBC connection pool to avoid too many connections created
|
||||
* (as well as consuming memory, it's expensive to create)
|
||||
* - Use barrier associated execution tasks at the beginning and end to track
|
||||
* down start/end times for runs.
|
||||
*
|
||||
* @author Galder Zamarreño
|
||||
* @since 4.1
|
||||
*/
|
||||
@Ignore
|
||||
public class SecondLevelCacheStressTestCase {
|
||||
|
||||
static final int NUM_THREADS = 10;
|
||||
static final long WARMUP_TIME = TimeUnit.SECONDS.toNanos(Integer.getInteger("warmup-time", 1) * 5);
|
||||
static final long RUNNING_TIME = TimeUnit.SECONDS.toNanos(Integer.getInteger("time", 1) * 60);
|
||||
static final boolean PROFILE = Boolean.getBoolean("profile");
|
||||
static final boolean ALLOCATION = Boolean.getBoolean("allocation");
|
||||
static final int RUN_COUNT_LIMIT = Integer.getInteger("count", 1000); // max number of runs per operation
|
||||
static final Random RANDOM = new Random(12345);
|
||||
|
||||
String provider;
|
||||
ConcurrentHashSet<Integer> updatedIds;
|
||||
Queue<Integer> removeIds;
|
||||
SessionFactory sessionFactory;
|
||||
TransactionManager tm;
|
||||
volatile int numEntities;
|
||||
|
||||
@Before
|
||||
public void beforeClass() {
|
||||
provider = getProvider();
|
||||
|
||||
updatedIds = new ConcurrentHashSet<Integer>();
|
||||
removeIds = new ConcurrentLinkedQueue<Integer>();
|
||||
|
||||
Configuration cfg = new Configuration();
|
||||
cfg.setProperty(Environment.USE_SECOND_LEVEL_CACHE, "true");
|
||||
cfg.setProperty(Environment.USE_QUERY_CACHE, "true");
|
||||
configureCache(cfg);
|
||||
|
||||
// Mappings
|
||||
configureMappings(cfg);
|
||||
|
||||
// Database settings
|
||||
cfg.setProperty(Environment.DRIVER, "com.mysql.jdbc.Driver");
|
||||
cfg.setProperty(Environment.URL, "jdbc:mysql://localhost:3306/hibernate");
|
||||
cfg.setProperty(Environment.DIALECT, "org.hibernate.dialect.MySQL5InnoDBDialect");
|
||||
cfg.setProperty(Environment.USER, "root");
|
||||
cfg.setProperty(Environment.PASS, "password");
|
||||
|
||||
// Create database schema in each run
|
||||
cfg.setProperty(Environment.HBM2DDL_AUTO, "create-drop");
|
||||
|
||||
StandardServiceRegistryImpl registry =
|
||||
ServiceRegistryBuilder.buildServiceRegistry(cfg.getProperties());
|
||||
sessionFactory = cfg.buildSessionFactory(registry);
|
||||
|
||||
tm = com.arjuna.ats.jta.TransactionManager.transactionManager();
|
||||
}
|
||||
|
||||
protected String getProvider() {
|
||||
return "infinispan";
|
||||
}
|
||||
|
||||
protected void configureCache(Configuration cfg) {
|
||||
cfg.setProperty(Environment.CACHE_REGION_FACTORY,
|
||||
"org.hibernate.cache.infinispan.InfinispanRegionFactory");
|
||||
cfg.setProperty(Environment.JTA_PLATFORM,
|
||||
"org.hibernate.service.jta.platform.internal.JBossStandAloneJtaPlatform");
|
||||
cfg.setProperty(InfinispanRegionFactory.INFINISPAN_CONFIG_RESOURCE_PROP,
|
||||
"stress-local-infinispan.xml");
|
||||
}
|
||||
|
||||
@After
|
||||
public void afterClass() {
|
||||
sessionFactory.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEntityLifecycle() throws InterruptedException {
|
||||
if (!PROFILE) {
|
||||
System.out.printf("[provider=%s] Warming up\n", provider);
|
||||
doEntityLifecycle(true);
|
||||
|
||||
// Recreate session factory cleaning everything
|
||||
afterClass();
|
||||
beforeClass();
|
||||
}
|
||||
|
||||
System.out.printf("[provider=%s] Testing...\n", provider);
|
||||
doEntityLifecycle(false);
|
||||
}
|
||||
|
||||
void doEntityLifecycle(boolean isWarmup) {
|
||||
long runningTimeout = isWarmup ? WARMUP_TIME : RUNNING_TIME;
|
||||
TotalStats insertPerf = runEntityInsert(runningTimeout);
|
||||
numEntities = countEntities().intValue();
|
||||
printResult(isWarmup, "[provider=%s] Inserts/s %10.2f (%d entities)\n",
|
||||
provider, insertPerf.getOpsPerSec("INSERT"), numEntities);
|
||||
|
||||
TotalStats updatePerf = runEntityUpdate(runningTimeout);
|
||||
List<Integer> updateIdsSeq = new ArrayList<Integer>(updatedIds);
|
||||
printResult(isWarmup, "[provider=%s] Updates/s %10.2f (%d updates)\n",
|
||||
provider, updatePerf.getOpsPerSec("UPDATE"), updateIdsSeq.size());
|
||||
|
||||
TotalStats findUpdatedPerf =
|
||||
runEntityFindUpdated(runningTimeout, updateIdsSeq);
|
||||
printResult(isWarmup, "[provider=%s] Updated finds/s %10.2f\n",
|
||||
provider, findUpdatedPerf.getOpsPerSec("FIND_UPDATED"));
|
||||
|
||||
TotalStats findQueryPerf = runEntityFindQuery(runningTimeout, isWarmup);
|
||||
printResult(isWarmup, "[provider=%s] Query finds/s %10.2f\n",
|
||||
provider, findQueryPerf.getOpsPerSec("FIND_QUERY"));
|
||||
|
||||
TotalStats findRandomPerf = runEntityFindRandom(runningTimeout);
|
||||
printResult(isWarmup, "[provider=%s] Random finds/s %10.2f\n",
|
||||
provider, findRandomPerf.getOpsPerSec("FIND_RANDOM"));
|
||||
|
||||
// Get all entity ids
|
||||
List<Integer> entityIds = new ArrayList<Integer>();
|
||||
for (int i = 1; i <= numEntities; i++) entityIds.add(i);
|
||||
|
||||
// Shuffle them
|
||||
Collections.shuffle(entityIds);
|
||||
|
||||
// Add them to the queue delete consumption
|
||||
removeIds.addAll(entityIds);
|
||||
|
||||
TotalStats deletePerf = runEntityDelete(runningTimeout);
|
||||
printResult(isWarmup, "[provider=%s] Deletes/s %10.2f\n",
|
||||
provider, deletePerf.getOpsPerSec("DELETE"));
|
||||
|
||||
// TODO Print 2LC statistics...
|
||||
}
|
||||
|
||||
static void printResult(boolean isWarmup, String format, Object... args) {
|
||||
if (!isWarmup) System.out.printf(format, args);
|
||||
}
|
||||
|
||||
Long countEntities() {
|
||||
try {
|
||||
return withTx(tm, new Callable<Long>() {
|
||||
@Override
|
||||
public Long call() throws Exception {
|
||||
Session s = sessionFactory.openSession();
|
||||
Query query = s.createQuery("select count(*) from Family");
|
||||
Object result = query.list().get(0);
|
||||
s.close();
|
||||
return (Long) result;
|
||||
}
|
||||
});
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
TotalStats runEntityInsert(long runningTimeout) {
|
||||
return runSingleWork(runningTimeout, "insert", insertOperation());
|
||||
}
|
||||
|
||||
TotalStats runEntityUpdate(long runningTimeout) {
|
||||
return runSingleWork(runningTimeout, "update", updateOperation());
|
||||
}
|
||||
|
||||
TotalStats runEntityFindUpdated(long runningTimeout,
|
||||
List<Integer> updatedIdsSeq) {
|
||||
return runSingleWork(runningTimeout, "find-updated", findUpdatedOperation(updatedIdsSeq));
|
||||
}
|
||||
|
||||
TotalStats runEntityFindQuery(long runningTimeout, boolean warmup) {
|
||||
return runSingleWork(runningTimeout, "find-query", findQueryOperation(warmup));
|
||||
}
|
||||
|
||||
TotalStats runEntityFindRandom(long runningTimeout) {
|
||||
return runSingleWork(runningTimeout, "find-random", findRandomOperation());
|
||||
}
|
||||
|
||||
TotalStats runEntityDelete(long runningTimeout) {
|
||||
return runSingleWork(runningTimeout, "remove", deleteOperation());
|
||||
}
|
||||
|
||||
TotalStats runSingleWork(long runningTimeout, final String name, Operation op) {
|
||||
final TotalStats perf = new TotalStats();
|
||||
|
||||
ExecutorService exec = Executors.newFixedThreadPool(
|
||||
NUM_THREADS, new ThreadFactory() {
|
||||
volatile int i = 0;
|
||||
@Override
|
||||
public Thread newThread(Runnable r) {
|
||||
return new Thread(new ThreadGroup(name),
|
||||
r, "worker-" + name + "-" + i++);
|
||||
}
|
||||
});
|
||||
|
||||
try {
|
||||
List<Future<Void>> futures = new ArrayList<Future<Void>>(NUM_THREADS);
|
||||
CyclicBarrier barrier = new CyclicBarrier(NUM_THREADS + 1);
|
||||
|
||||
for (int i = 0; i < NUM_THREADS; i++)
|
||||
futures.add(exec.submit(
|
||||
new WorkerThread(runningTimeout, perf, op, barrier)));
|
||||
|
||||
try {
|
||||
barrier.await(); // wait for all threads to be ready
|
||||
barrier.await(); // wait for all threads to finish
|
||||
|
||||
// Now check whether anything went wrong...
|
||||
for (Future<Void> future : futures) future.get();
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
return perf;
|
||||
} finally {
|
||||
exec.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
<T> T captureThrowables(Callable<T> task) throws Exception {
|
||||
try {
|
||||
return task.call();
|
||||
} catch (Throwable t) {
|
||||
t.printStackTrace();
|
||||
if (t instanceof Exception)
|
||||
throw (Exception) t;
|
||||
else
|
||||
throw new RuntimeException(t);
|
||||
}
|
||||
}
|
||||
|
||||
Operation insertOperation() {
|
||||
return new Operation("INSERT") {
|
||||
@Override
|
||||
boolean call(final int run) throws Exception {
|
||||
return captureThrowables(new Callable<Boolean>() {
|
||||
@Override
|
||||
public Boolean call() throws Exception {
|
||||
return withTx(tm, new Callable<Boolean>() {
|
||||
@Override
|
||||
public Boolean call() throws Exception {
|
||||
Session s = sessionFactory.openSession();
|
||||
s.getTransaction().begin();
|
||||
|
||||
String name = "Zamarreño-" + run;
|
||||
Family family = new Family(name);
|
||||
s.persist(family);
|
||||
|
||||
s.getTransaction().commit();
|
||||
s.close();
|
||||
return true;
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
Operation updateOperation() {
|
||||
return new Operation("UPDATE") {
|
||||
@Override
|
||||
boolean call(final int run) throws Exception {
|
||||
return captureThrowables(new Callable<Boolean>() {
|
||||
@Override
|
||||
public Boolean call() throws Exception {
|
||||
return withTx(tm, new Callable<Boolean>() {
|
||||
@Override
|
||||
public Boolean call() throws Exception {
|
||||
Session s = sessionFactory.openSession();
|
||||
s.getTransaction().begin();
|
||||
|
||||
// Update random entity that has been inserted
|
||||
int id = RANDOM.nextInt(numEntities) + 1;
|
||||
Family family = (Family) s.load(Family.class, id);
|
||||
String newSecondName = "Arrizabalaga-" + run;
|
||||
family.setSecondName(newSecondName);
|
||||
|
||||
s.getTransaction().commit();
|
||||
s.close();
|
||||
// Cache updated entities for later read
|
||||
updatedIds.add(id);
|
||||
return true;
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
Operation findUpdatedOperation(final List<Integer> updatedIdsSeq) {
|
||||
return new Operation("FIND_UPDATED") {
|
||||
@Override
|
||||
boolean call(final int run) throws Exception {
|
||||
return captureThrowables(new Callable<Boolean>() {
|
||||
@Override
|
||||
public Boolean call() throws Exception {
|
||||
Session s = sessionFactory.openSession();
|
||||
|
||||
int id = updatedIdsSeq.get(RANDOM.nextInt(
|
||||
updatedIdsSeq.size()));
|
||||
Family family = (Family) s.load(Family.class, id);
|
||||
String secondName = family.getSecondName();
|
||||
assertNotNull(secondName);
|
||||
assertTrue("Second name not expected: " + secondName,
|
||||
secondName.startsWith("Arrizabalaga"));
|
||||
|
||||
s.close();
|
||||
return true;
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private Operation findQueryOperation(final boolean isWarmup) {
|
||||
return new Operation("FIND_QUERY") {
|
||||
@Override
|
||||
boolean call(final int run) throws Exception {
|
||||
return captureThrowables(new Callable<Boolean>() {
|
||||
@Override
|
||||
public Boolean call() throws Exception {
|
||||
Session s = sessionFactory.openSession();
|
||||
|
||||
Query query = s.createQuery("from Family")
|
||||
.setCacheable(true);
|
||||
int maxResults = isWarmup ? 10 : 100;
|
||||
query.setMaxResults(maxResults);
|
||||
List<Family> result = (List<Family>) query.list();
|
||||
assertEquals(maxResults, result.size());
|
||||
|
||||
s.close();
|
||||
return true;
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private Operation findRandomOperation() {
|
||||
return new Operation("FIND_RANDOM") {
|
||||
@Override
|
||||
boolean call(final int run) throws Exception {
|
||||
return captureThrowables(new Callable<Boolean>() {
|
||||
@Override
|
||||
public Boolean call() throws Exception {
|
||||
Session s = sessionFactory.openSession();
|
||||
|
||||
int id = RANDOM.nextInt(numEntities) + 1;
|
||||
Family family = (Family) s.load(Family.class, id);
|
||||
String familyName = family.getName();
|
||||
// Skip ñ check in order to avoid issues...
|
||||
assertTrue("Unexpected family: " + familyName ,
|
||||
familyName.startsWith("Zamarre"));
|
||||
|
||||
s.close();
|
||||
return true;
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private Operation deleteOperation() {
|
||||
return new Operation("DELETE") {
|
||||
@Override
|
||||
boolean call(final int run) throws Exception {
|
||||
return captureThrowables(new Callable<Boolean>() {
|
||||
@Override
|
||||
public Boolean call() throws Exception {
|
||||
return withTx(tm, new Callable<Boolean>() {
|
||||
@Override
|
||||
public Boolean call() throws Exception {
|
||||
Session s = sessionFactory.openSession();
|
||||
s.getTransaction().begin();
|
||||
|
||||
// Get each id and remove it
|
||||
int id = removeIds.poll();
|
||||
Family family = (Family) s.load(Family.class, id);
|
||||
String familyName = family.getName();
|
||||
// Skip ñ check in order to avoid issues...
|
||||
assertTrue("Unexpected family: " + familyName ,
|
||||
familyName.startsWith("Zamarre"));
|
||||
s.delete(family);
|
||||
|
||||
s.getTransaction().commit();
|
||||
s.close();
|
||||
|
||||
return true;
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
public static Class<Object>[] getAnnotatedClasses() {
|
||||
return new Class[] {Family.class, Person.class, Address.class};
|
||||
}
|
||||
|
||||
private static void configureMappings(Configuration cfg) {
|
||||
Class<?>[] annotatedClasses = getAnnotatedClasses();
|
||||
if ( annotatedClasses != null ) {
|
||||
for ( Class<?> annotatedClass : annotatedClasses ) {
|
||||
cfg.addAnnotatedClass( annotatedClass );
|
||||
}
|
||||
}
|
||||
|
||||
cfg.buildMappings();
|
||||
Iterator it = cfg.getClassMappings();
|
||||
String cacheStrategy = "transactional";
|
||||
while (it.hasNext()) {
|
||||
PersistentClass clazz = (PersistentClass) it.next();
|
||||
if (!clazz.isInherited()) {
|
||||
cfg.setCacheConcurrencyStrategy(clazz.getEntityName(), cacheStrategy);
|
||||
}
|
||||
}
|
||||
it = cfg.getCollectionMappings();
|
||||
while (it.hasNext()) {
|
||||
Collection coll = (Collection) it.next();
|
||||
cfg.setCollectionCacheConcurrencyStrategy(coll.getRole(), cacheStrategy);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private static abstract class Operation {
|
||||
final String name;
|
||||
|
||||
Operation(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
abstract boolean call(int run) throws Exception;
|
||||
|
||||
}
|
||||
|
||||
private class WorkerThread implements Callable<Void> {
|
||||
private final long runningTimeout;
|
||||
private final TotalStats perf;
|
||||
private final Operation op;
|
||||
private final CyclicBarrier barrier;
|
||||
|
||||
public WorkerThread(long runningTimeout, TotalStats perf,
|
||||
Operation op, CyclicBarrier barrier) {
|
||||
this.runningTimeout = runningTimeout;
|
||||
this.perf = perf;
|
||||
this.op = op;
|
||||
this.barrier = barrier;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void call() throws Exception {
|
||||
// TODO: Extend barrier to capture start time
|
||||
barrier.await();
|
||||
try {
|
||||
long startNanos = System.nanoTime();
|
||||
long endNanos = startNanos + runningTimeout;
|
||||
int runs = 0;
|
||||
long missCount = 0;
|
||||
while (callOperation(endNanos, runs)) {
|
||||
boolean hit = op.call(runs);
|
||||
if (!hit) missCount++;
|
||||
runs++;
|
||||
}
|
||||
|
||||
// TODO: Extend barrier to capture end time
|
||||
perf.addStats(op.name, runs,
|
||||
System.nanoTime() - startNanos, missCount);
|
||||
} finally {
|
||||
barrier.await();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private boolean callOperation(long endNanos, int runs) {
|
||||
if (ALLOCATION) {
|
||||
return runs < RUN_COUNT_LIMIT;
|
||||
} else {
|
||||
return (runs & 0x400) != 0 || System.nanoTime() < endNanos;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static class TotalStats {
|
||||
private ConcurrentHashMap<String, OpStats> statsMap =
|
||||
new ConcurrentHashMap<String, OpStats>();
|
||||
|
||||
public void addStats(String opName, long opCount,
|
||||
long runningTime, long missCount) {
|
||||
OpStats s = new OpStats(opName, opCount, runningTime, missCount);
|
||||
OpStats old = statsMap.putIfAbsent(opName, s);
|
||||
boolean replaced = old == null;
|
||||
while (!replaced) {
|
||||
old = statsMap.get(opName);
|
||||
s = new OpStats(old, opCount, runningTime, missCount);
|
||||
replaced = statsMap.replace(opName, old, s);
|
||||
}
|
||||
}
|
||||
|
||||
public double getOpsPerSec(String opName) {
|
||||
OpStats s = statsMap.get(opName);
|
||||
if (s == null) return 0;
|
||||
return s.opCount * 1000000000. / s.runningTime * s.threadCount;
|
||||
}
|
||||
|
||||
public double getTotalOpsPerSec() {
|
||||
long totalOpCount = 0;
|
||||
long totalRunningTime = 0;
|
||||
long totalThreadCount = 0;
|
||||
for (Map.Entry<String, OpStats> e : statsMap.entrySet()) {
|
||||
OpStats s = e.getValue();
|
||||
totalOpCount += s.opCount;
|
||||
totalRunningTime += s.runningTime;
|
||||
totalThreadCount += s.threadCount;
|
||||
}
|
||||
return totalOpCount * 1000000000. / totalRunningTime * totalThreadCount;
|
||||
}
|
||||
|
||||
public double getHitRatio(String opName) {
|
||||
OpStats s = statsMap.get(opName);
|
||||
if (s == null) return 0;
|
||||
return 1 - 1. * s.missCount / s.opCount;
|
||||
}
|
||||
|
||||
public double getTotalHitRatio() {
|
||||
long totalOpCount = 0;
|
||||
long totalMissCount = 0;
|
||||
for (Map.Entry<String, OpStats> e : statsMap.entrySet()) {
|
||||
OpStats s = e.getValue();
|
||||
totalOpCount += s.opCount;
|
||||
totalMissCount += s.missCount;
|
||||
}
|
||||
return 1 - 1. * totalMissCount / totalOpCount;
|
||||
}
|
||||
}
|
||||
|
||||
private static class OpStats {
|
||||
public final String opName;
|
||||
public final int threadCount;
|
||||
public final long opCount;
|
||||
public final long runningTime;
|
||||
public final long missCount;
|
||||
|
||||
private OpStats(String opName, long opCount,
|
||||
long runningTime, long missCount) {
|
||||
this.opName = opName;
|
||||
this.threadCount = 1;
|
||||
this.opCount = opCount;
|
||||
this.runningTime = runningTime;
|
||||
this.missCount = missCount;
|
||||
}
|
||||
|
||||
private OpStats(OpStats base, long opCount,
|
||||
long runningTime, long missCount) {
|
||||
this.opName = base.opName;
|
||||
this.threadCount = base.threadCount + 1;
|
||||
this.opCount = base.opCount + opCount;
|
||||
this.runningTime = base.runningTime + runningTime;
|
||||
this.missCount = base.missCount + missCount;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
216
hibernate-infinispan/src/test/java/org/hibernate/test/cache/infinispan/stress/entities/Address.java
vendored
Normal file
216
hibernate-infinispan/src/test/java/org/hibernate/test/cache/infinispan/stress/entities/Address.java
vendored
Normal file
|
@ -0,0 +1,216 @@
|
|||
/*
|
||||
* JBoss, Home of Professional Open Source
|
||||
* Copyright 2012 Red Hat Inc. and/or its affiliates and other
|
||||
* contributors as indicated by the @author tags. All rights reserved.
|
||||
* See the copyright.txt in the distribution for a full listing of
|
||||
* individual contributors.
|
||||
*
|
||||
* This is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU Lesser General Public License as
|
||||
* published by the Free Software Foundation; either version 2.1 of
|
||||
* the License, or (at your option) any later version.
|
||||
*
|
||||
* This software is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this software; if not, write to the Free
|
||||
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
||||
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
|
||||
*/
|
||||
|
||||
package org.hibernate.test.cache.infinispan.stress.entities;
|
||||
|
||||
import javax.persistence.Entity;
|
||||
import javax.persistence.GeneratedValue;
|
||||
import javax.persistence.Id;
|
||||
import javax.persistence.OneToMany;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.Set;
|
||||
|
||||
@Entity
|
||||
public final class Address {
|
||||
|
||||
@Id
|
||||
@GeneratedValue
|
||||
private int id;
|
||||
private int streetNumber;
|
||||
private String streetName;
|
||||
private String cityName;
|
||||
private String countryName;
|
||||
private String zipCode;
|
||||
@OneToMany
|
||||
private Set<Person> inhabitants;
|
||||
private int version;
|
||||
|
||||
public Address(int streetNumber, String streetName, String cityName, String countryName) {
|
||||
this.streetNumber = streetNumber;
|
||||
this.streetName = streetName;
|
||||
this.cityName = cityName;
|
||||
this.countryName = countryName;
|
||||
this.zipCode = null;
|
||||
this.inhabitants = new HashSet<Person>();
|
||||
this.id = 0;
|
||||
this.version = 0;
|
||||
}
|
||||
|
||||
protected Address() {
|
||||
this.streetNumber = 0;
|
||||
this.streetName = null;
|
||||
this.cityName = null;
|
||||
this.countryName = null;
|
||||
this.zipCode = null;
|
||||
this.inhabitants = new HashSet<Person>();
|
||||
this.id = 0;
|
||||
this.version = 0;
|
||||
}
|
||||
|
||||
public int getStreetNumber() {
|
||||
return streetNumber;
|
||||
}
|
||||
|
||||
public String getStreetName() {
|
||||
return streetName;
|
||||
}
|
||||
|
||||
public String getCityName() {
|
||||
return cityName;
|
||||
}
|
||||
|
||||
public String getCountryName() {
|
||||
return countryName;
|
||||
}
|
||||
|
||||
public String getZipCode() {
|
||||
return zipCode;
|
||||
}
|
||||
|
||||
public void setZipCode(String zipCode) {
|
||||
this.zipCode = zipCode;
|
||||
}
|
||||
|
||||
public Set<Person> getInhabitants() {
|
||||
return inhabitants;
|
||||
}
|
||||
|
||||
public boolean addInhabitant(Person inhabitant) {
|
||||
boolean done = false;
|
||||
if (inhabitants.add(inhabitant)) {
|
||||
inhabitant.setAddress(this);
|
||||
done = true;
|
||||
}
|
||||
return done;
|
||||
}
|
||||
|
||||
public boolean remInhabitant(Person inhabitant) {
|
||||
boolean done = false;
|
||||
if (inhabitants.remove(inhabitant)) {
|
||||
inhabitant.setAddress(null);
|
||||
done = true;
|
||||
}
|
||||
return done;
|
||||
}
|
||||
|
||||
public int getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId(int id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
public int getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
protected void removeAllInhabitants() {
|
||||
// inhabitants relation is not CASCADED, we must delete the relation on other side by ourselves
|
||||
for (Iterator<Person> iterator = inhabitants.iterator(); iterator.hasNext(); ) {
|
||||
Person p = iterator.next();
|
||||
p.setAddress(null);
|
||||
}
|
||||
}
|
||||
|
||||
protected void setStreetNumber(int streetNumber) {
|
||||
this.streetNumber = streetNumber;
|
||||
}
|
||||
|
||||
protected void setStreetName(String streetName) {
|
||||
this.streetName = streetName;
|
||||
}
|
||||
|
||||
protected void setCityName(String cityName) {
|
||||
this.cityName = cityName;
|
||||
}
|
||||
|
||||
protected void setCountryName(String countryName) {
|
||||
this.countryName = countryName;
|
||||
}
|
||||
|
||||
protected void setInhabitants(Set<Person> inhabitants) {
|
||||
if (inhabitants == null) {
|
||||
this.inhabitants = new HashSet<Person>();
|
||||
} else {
|
||||
this.inhabitants = inhabitants;
|
||||
}
|
||||
}
|
||||
|
||||
protected void setVersion(Integer version) {
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
Address address = (Address) o;
|
||||
|
||||
if (id != address.id) return false;
|
||||
if (streetNumber != address.streetNumber) return false;
|
||||
if (version != address.version) return false;
|
||||
if (cityName != null ? !cityName.equals(address.cityName) : address.cityName != null)
|
||||
return false;
|
||||
if (countryName != null ? !countryName.equals(address.countryName) : address.countryName != null)
|
||||
return false;
|
||||
if (inhabitants != null ? !inhabitants.equals(address.inhabitants) : address.inhabitants != null)
|
||||
return false;
|
||||
if (streetName != null ? !streetName.equals(address.streetName) : address.streetName != null)
|
||||
return false;
|
||||
if (zipCode != null ? !zipCode.equals(address.zipCode) : address.zipCode != null)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = streetNumber;
|
||||
result = 31 * result + (streetName != null ? streetName.hashCode() : 0);
|
||||
result = 31 * result + (cityName != null ? cityName.hashCode() : 0);
|
||||
result = 31 * result + (countryName != null ? countryName.hashCode() : 0);
|
||||
result = 31 * result + (zipCode != null ? zipCode.hashCode() : 0);
|
||||
result = 31 * result + (inhabitants != null ? inhabitants.hashCode() : 0);
|
||||
result = 31 * result + id;
|
||||
result = 31 * result + version;
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Address{" +
|
||||
"cityName='" + cityName + '\'' +
|
||||
", streetNumber=" + streetNumber +
|
||||
", streetName='" + streetName + '\'' +
|
||||
", countryName='" + countryName + '\'' +
|
||||
", zipCode='" + zipCode + '\'' +
|
||||
", inhabitants=" + inhabitants +
|
||||
", id=" + id +
|
||||
", version=" + version +
|
||||
'}';
|
||||
}
|
||||
|
||||
}
|
149
hibernate-infinispan/src/test/java/org/hibernate/test/cache/infinispan/stress/entities/Family.java
vendored
Normal file
149
hibernate-infinispan/src/test/java/org/hibernate/test/cache/infinispan/stress/entities/Family.java
vendored
Normal file
|
@ -0,0 +1,149 @@
|
|||
/*
|
||||
* JBoss, Home of Professional Open Source
|
||||
* Copyright 2012 Red Hat Inc. and/or its affiliates and other
|
||||
* contributors as indicated by the @author tags. All rights reserved.
|
||||
* See the copyright.txt in the distribution for a full listing of
|
||||
* individual contributors.
|
||||
*
|
||||
* This is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU Lesser General Public License as
|
||||
* published by the Free Software Foundation; either version 2.1 of
|
||||
* the License, or (at your option) any later version.
|
||||
*
|
||||
* This software is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this software; if not, write to the Free
|
||||
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
||||
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
|
||||
*/
|
||||
|
||||
package org.hibernate.test.cache.infinispan.stress.entities;
|
||||
|
||||
import javax.persistence.Entity;
|
||||
import javax.persistence.GeneratedValue;
|
||||
import javax.persistence.Id;
|
||||
import javax.persistence.OneToMany;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
@Entity
|
||||
public final class Family {
|
||||
|
||||
@Id
|
||||
@GeneratedValue
|
||||
private int id;
|
||||
private String name;
|
||||
private String secondName;
|
||||
@OneToMany
|
||||
private Set<Person> members;
|
||||
private int version;
|
||||
|
||||
public Family(String name) {
|
||||
this.name = name;
|
||||
this.secondName = null;
|
||||
this.members = new HashSet<Person>();
|
||||
this.id = 0;
|
||||
this.version = 0;
|
||||
}
|
||||
|
||||
protected Family() {
|
||||
this.name = null;
|
||||
this.secondName = null;
|
||||
this.members = new HashSet<Person>();
|
||||
this.id = 0;
|
||||
this.version = 0;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public Set<Person> getMembers() {
|
||||
return members;
|
||||
}
|
||||
|
||||
public String getSecondName() {
|
||||
return secondName;
|
||||
}
|
||||
|
||||
public void setSecondName(String secondName) {
|
||||
this.secondName = secondName;
|
||||
}
|
||||
|
||||
public int getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId(int id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
public int getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public void setMembers(Set<Person> members) {
|
||||
if (members == null) {
|
||||
this.members = new HashSet<Person>();
|
||||
} else {
|
||||
this.members = members;
|
||||
}
|
||||
}
|
||||
|
||||
public void setVersion(Integer version) {
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
boolean addMember(Person member) {
|
||||
return members.add(member);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
Family family = (Family) o;
|
||||
|
||||
if (id != family.id) return false;
|
||||
if (version != family.version) return false;
|
||||
if (members != null ? !members.equals(family.members) : family.members != null)
|
||||
return false;
|
||||
if (name != null ? !name.equals(family.name) : family.name != null)
|
||||
return false;
|
||||
if (secondName != null ? !secondName.equals(family.secondName) : family.secondName != null)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = name != null ? name.hashCode() : 0;
|
||||
result = 31 * result + (secondName != null ? secondName.hashCode() : 0);
|
||||
result = 31 * result + (members != null ? members.hashCode() : 0);
|
||||
result = 31 * result + id;
|
||||
result = 31 * result + version;
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Family{" +
|
||||
"id=" + id +
|
||||
", name='" + name + '\'' +
|
||||
", secondName='" + secondName + '\'' +
|
||||
", members=" + members +
|
||||
", version=" + version +
|
||||
'}';
|
||||
}
|
||||
|
||||
}
|
178
hibernate-infinispan/src/test/java/org/hibernate/test/cache/infinispan/stress/entities/Person.java
vendored
Normal file
178
hibernate-infinispan/src/test/java/org/hibernate/test/cache/infinispan/stress/entities/Person.java
vendored
Normal file
|
@ -0,0 +1,178 @@
|
|||
/*
|
||||
* JBoss, Home of Professional Open Source
|
||||
* Copyright 2012 Red Hat Inc. and/or its affiliates and other
|
||||
* contributors as indicated by the @author tags. All rights reserved.
|
||||
* See the copyright.txt in the distribution for a full listing of
|
||||
* individual contributors.
|
||||
*
|
||||
* This is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU Lesser General Public License as
|
||||
* published by the Free Software Foundation; either version 2.1 of
|
||||
* the License, or (at your option) any later version.
|
||||
*
|
||||
* This software is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this software; if not, write to the Free
|
||||
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
||||
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
|
||||
*/
|
||||
|
||||
package org.hibernate.test.cache.infinispan.stress.entities;
|
||||
|
||||
import javax.persistence.Entity;
|
||||
import javax.persistence.GeneratedValue;
|
||||
import javax.persistence.Id;
|
||||
import javax.persistence.ManyToOne;
|
||||
import java.util.Date;
|
||||
|
||||
@Entity
|
||||
public class Person {
|
||||
|
||||
@Id
|
||||
@GeneratedValue
|
||||
private int id;
|
||||
private String firstName;
|
||||
@ManyToOne
|
||||
private Family family;
|
||||
private Date birthDate;
|
||||
@ManyToOne
|
||||
private Address address;
|
||||
private boolean checked;
|
||||
private int version;
|
||||
|
||||
public Person(String firstName, Family family) {
|
||||
this.firstName = firstName;
|
||||
this.family = family;
|
||||
this.birthDate = null;
|
||||
this.address = null;
|
||||
this.checked = false;
|
||||
this.id = 0;
|
||||
this.version = 0;
|
||||
this.family.addMember(this);
|
||||
}
|
||||
|
||||
protected Person() {
|
||||
this.firstName = null;
|
||||
this.family = null;
|
||||
this.birthDate = null;
|
||||
this.address = null;
|
||||
this.checked = false;
|
||||
this.id = 0;
|
||||
this.version = 0;
|
||||
}
|
||||
|
||||
public String getFirstName() {
|
||||
return firstName;
|
||||
}
|
||||
|
||||
public Family getFamily() {
|
||||
return family;
|
||||
}
|
||||
|
||||
public Date getBirthDate() {
|
||||
return birthDate;
|
||||
}
|
||||
|
||||
public void setBirthDate(Date birthDate) {
|
||||
this.birthDate = birthDate;
|
||||
}
|
||||
|
||||
public Address getAddress() {
|
||||
return address;
|
||||
}
|
||||
|
||||
public void setAddress(Address address) {
|
||||
// To skip Hibernate BUG with access.PROPERTY : the rest should be done in DAO
|
||||
// this.address = address;
|
||||
// Hibernate BUG : if we update the relation on 2 sides
|
||||
if (this.address != address) {
|
||||
if (this.address != null) this.address.remInhabitant(this);
|
||||
this.address = address;
|
||||
if (this.address != null) this.address.addInhabitant(this);
|
||||
}
|
||||
}
|
||||
|
||||
public boolean isChecked() {
|
||||
return checked;
|
||||
}
|
||||
|
||||
public void setChecked(boolean checked) {
|
||||
this.checked = checked;
|
||||
}
|
||||
|
||||
public int getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId(int id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
public int getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
protected void setFirstName(String firstName) {
|
||||
this.firstName = firstName;
|
||||
}
|
||||
|
||||
protected void setFamily(Family family) {
|
||||
this.family = family;
|
||||
}
|
||||
|
||||
protected void setVersion(Integer version) {
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
Person person = (Person) o;
|
||||
|
||||
if (checked != person.checked) return false;
|
||||
if (id != person.id) return false;
|
||||
if (version != person.version) return false;
|
||||
if (address != null ? !address.equals(person.address) : person.address != null)
|
||||
return false;
|
||||
if (birthDate != null ? !birthDate.equals(person.birthDate) : person.birthDate != null)
|
||||
return false;
|
||||
if (family != null ? !family.equals(person.family) : person.family != null)
|
||||
return false;
|
||||
if (firstName != null ? !firstName.equals(person.firstName) : person.firstName != null)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = firstName != null ? firstName.hashCode() : 0;
|
||||
result = 31 * result + (family != null ? family.hashCode() : 0);
|
||||
result = 31 * result + (birthDate != null ? birthDate.hashCode() : 0);
|
||||
result = 31 * result + (address != null ? address.hashCode() : 0);
|
||||
result = 31 * result + (checked ? 1 : 0);
|
||||
result = 31 * result + id;
|
||||
result = 31 * result + version;
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Person{" +
|
||||
"address=" + address +
|
||||
", firstName='" + firstName + '\'' +
|
||||
", family=" + family +
|
||||
", birthDate=" + birthDate +
|
||||
", checked=" + checked +
|
||||
", id=" + id +
|
||||
", version=" + version +
|
||||
'}';
|
||||
}
|
||||
|
||||
}
|
|
@ -26,6 +26,7 @@ package org.hibernate.test.cache.infinispan.timestamp;
|
|||
import java.util.Properties;
|
||||
|
||||
import org.infinispan.AdvancedCache;
|
||||
import org.infinispan.context.Flag;
|
||||
import org.infinispan.notifications.Listener;
|
||||
import org.infinispan.notifications.cachelistener.annotation.CacheEntryActivated;
|
||||
import org.infinispan.notifications.cachelistener.annotation.CacheEntryCreated;
|
||||
|
@ -42,9 +43,6 @@ import org.hibernate.boot.registry.StandardServiceRegistryBuilder;
|
|||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.hibernate.cache.infinispan.impl.ClassLoaderAwareCache;
|
||||
import org.hibernate.cache.infinispan.timestamp.TimestampsRegionImpl;
|
||||
import org.hibernate.cache.infinispan.util.CacheAdapter;
|
||||
import org.hibernate.cache.infinispan.util.CacheAdapterImpl;
|
||||
import org.hibernate.cache.infinispan.util.FlagAdapter;
|
||||
import org.hibernate.cache.spi.CacheDataDescription;
|
||||
import org.hibernate.cache.spi.Region;
|
||||
import org.hibernate.cache.spi.UpdateTimestampsCache;
|
||||
|
@ -75,8 +73,8 @@ public class TimestampsRegionImplTestCase extends AbstractGeneralDataRegionTestC
|
|||
}
|
||||
|
||||
@Override
|
||||
protected CacheAdapter getInfinispanCache(InfinispanRegionFactory regionFactory) {
|
||||
return CacheAdapterImpl.newInstance(regionFactory.getCacheManager().getCache("timestamps").getAdvancedCache());
|
||||
protected AdvancedCache getInfinispanCache(InfinispanRegionFactory regionFactory) {
|
||||
return regionFactory.getCacheManager().getCache("timestamps").getAdvancedCache();
|
||||
}
|
||||
|
||||
public void testClearTimestampsRegionInIsolated() throws Exception {
|
||||
|
@ -108,7 +106,7 @@ public class TimestampsRegionImplTestCase extends AbstractGeneralDataRegionTestC
|
|||
|
||||
Account acct = new Account();
|
||||
acct.setAccountHolder(new AccountHolder());
|
||||
region.getCacheAdapter().withFlags(FlagAdapter.FORCE_SYNCHRONOUS).put(acct, "boo");
|
||||
region.getCache().withFlags(Flag.FORCE_SYNCHRONOUS).put(acct, "boo");
|
||||
|
||||
// region.put(acct, "boo");
|
||||
//
|
||||
|
|
|
@ -137,7 +137,8 @@ public class XaTransactionImpl implements Transaction {
|
|||
if (synchronizations != null) {
|
||||
for (int i = 0; i < synchronizations.size(); i++) {
|
||||
Synchronization s = (Synchronization) synchronizations.get(i);
|
||||
s.afterCompletion(status);
|
||||
if (s != null)
|
||||
s.afterCompletion(status);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -9,4 +9,4 @@ log4j.logger.org.hibernate.test=info
|
|||
log4j.logger.org.hibernate.cache=info
|
||||
|
||||
# SQL Logging - HHH-6833
|
||||
log4j.logger.org.hibernate.SQL=debug
|
||||
log4j.logger.org.hibernate.SQL=warn
|
|
@ -0,0 +1,47 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<!-- Infinispan configuration based on the AS7 standalone, single node, set up -->
|
||||
<infinispan xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xmlns="urn:infinispan:config:5.1"
|
||||
xsi:schemaLocation="urn:infinispan:config:5.1 http://www.infinispan.org/schemas/infinispan-config-5.1.xsd">
|
||||
<global>
|
||||
<!-- No JGroups transport -->
|
||||
</global>
|
||||
|
||||
<default>
|
||||
<!-- Used to register JMX statistics in any available MBean server -->
|
||||
<jmxStatistics enabled="false"/>
|
||||
</default>
|
||||
|
||||
<!-- Default configuration is appropriate for entity/collection caching. -->
|
||||
<namedCache name="entity">
|
||||
<locking isolationLevel="READ_COMMITTED" concurrencyLevel="1000"
|
||||
lockAcquisitionTimeout="15000" useLockStriping="false"/>
|
||||
<eviction maxEntries="140000" strategy="LRU"/>
|
||||
<expiration maxIdle="1200000" wakeUpInterval="60000"/>
|
||||
<!-- No lazy deserialization or store as binary for local caches -->
|
||||
<transaction transactionMode="TRANSACTIONAL" autoCommit="false"
|
||||
lockingMode="OPTIMISTIC" useSynchronization="true">
|
||||
<recovery enabled="false"/>
|
||||
</transaction>
|
||||
</namedCache>
|
||||
|
||||
<!-- A config appropriate for query caching. Does not replicate queries. -->
|
||||
<namedCache name="local-query">
|
||||
<locking isolationLevel="READ_COMMITTED" concurrencyLevel="1000"
|
||||
lockAcquisitionTimeout="15000" useLockStriping="false"/>
|
||||
<eviction maxEntries="140000" strategy="LRU"/>
|
||||
<expiration maxIdle="1200000" wakeUpInterval="60000"/>
|
||||
<transaction transactionMode="NON_TRANSACTIONAL" autoCommit="false" />
|
||||
</namedCache>
|
||||
|
||||
<!-- Optimized for timestamp caching. -->
|
||||
<namedCache name="timestamps">
|
||||
<locking isolationLevel="READ_COMMITTED" concurrencyLevel="1000"
|
||||
lockAcquisitionTimeout="15000" useLockStriping="false"/>
|
||||
<eviction strategy="NONE"/>
|
||||
<expiration wakeUpInterval="0"/>
|
||||
<transaction transactionMode="NON_TRANSACTIONAL"/>
|
||||
</namedCache>
|
||||
|
||||
</infinispan>
|
Loading…
Reference in New Issue